language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_layout04.py | {
"start": 315,
"end": 1602
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_layout04.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_title(
{
"name": "Title",
"layout": {
"x": 0.42631933508311465,
"y": 0.14351851851851852,
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | tools/code_coverage/package/tool/parser/llvm_coverage_segment.py | {
"start": 68,
"end": 1975
} | class ____(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
is_gap_entry: int | None
@property
def has_coverage(self) -> bool:
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: LlvmCoverageSegment
) -> tuple[list[int], list[int]]:
# Code adapted from testpilot.testinfra.runners.gtestcoveragerunner.py
if not prev_segment.is_executable:
return [], []
# this segment ends at the line if col == 1
# (so segment effectively ends on the line) and
# line+1 if col is > 1 (so it touches at least some part of last line).
end_of_segment = self.line if self.col == 1 else self.line + 1
lines_range = list(range(prev_segment.line, end_of_segment))
return (lines_range, []) if prev_segment.has_coverage else ([], lines_range)
def parse_segments(raw_segments: list[list[int]]) -> list[LlvmCoverageSegment]:
"""
Creates LlvmCoverageSegment from a list of lists in llvm export json.
each segment is represented by 5-element array.
"""
ret: list[LlvmCoverageSegment] = []
for raw_segment in raw_segments:
assert len(raw_segment) == 5 or len(raw_segment) == 6, (
"list is not compatible with llvmcom export:"
)
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
LlvmCoverageSegment(
raw_segment[0],
raw_segment[1],
raw_segment[2],
raw_segment[3],
raw_segment[4],
None,
)
)
else:
ret.append(LlvmCoverageSegment(*raw_segment))
return ret
| LlvmCoverageSegment |
python | scipy__scipy | scipy/sparse/linalg/_isolve/tests/test_iterative.py | {
"start": 20302,
"end": 26265
} | class ____:
def test_basic(self):
A = np.vander(np.arange(10) + 1)[:, ::-1]
b = np.zeros(10)
b[0] = 1
x_gm, err = gmres(A, b, restart=5, maxiter=1)
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
@pytest.mark.filterwarnings(f"ignore:{CB_TYPE_FILTER}:DeprecationWarning")
def test_callback(self):
def store_residual(r, rvec):
rvec[rvec.nonzero()[0].max() + 1] = r
# Define, A,b
A = csr_array(array([[-2, 1, 0, 0, 0, 0],
[1, -2, 1, 0, 0, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 0, 0, 1, -2, 1],
[0, 0, 0, 0, 1, -2]]))
b = ones((A.shape[0],))
maxiter = 1
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
def callback(r):
return store_residual(r, rvec)
x, flag = gmres(A, b, x0=zeros(A.shape[0]), rtol=1e-16,
maxiter=maxiter, callback=callback)
# Expected output from SciPy 1.0.0
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
# Test preconditioned callback
M = 1e-3 * np.eye(A.shape[0])
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
x, flag = gmres(A, b, M=M, rtol=1e-16, maxiter=maxiter,
callback=callback)
# Expected output from SciPy 1.0.0
# (callback has preconditioned residual!)
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]),
rtol=1e-10)
def test_abi(self):
# Check we don't segfault on gmres with complex argument
A = eye(2)
b = ones(2)
r_x, r_info = gmres(A, b)
r_x = r_x.astype(complex)
x, info = gmres(A.astype(complex), b.astype(complex))
assert iscomplexobj(x)
assert_allclose(r_x, x)
assert r_info == info
@pytest.mark.fail_slow(10)
def test_atol_legacy(self):
A = eye(2)
b = ones(2)
x, info = gmres(A, b, rtol=1e-5)
assert np.linalg.norm(A @ x - b) <= 1e-5 * np.linalg.norm(b)
assert_allclose(x, b, atol=0, rtol=1e-8)
rndm = np.random.RandomState(12345)
A = rndm.rand(30, 30)
b = 1e-6 * ones(30)
x, info = gmres(A, b, rtol=1e-7, restart=20)
assert np.linalg.norm(A @ x - b) > 1e-7
A = eye(2)
b = 1e-10 * ones(2)
x, info = gmres(A, b, rtol=1e-8, atol=0)
assert np.linalg.norm(A @ x - b) <= 1e-8 * np.linalg.norm(b)
def test_defective_precond_breakdown(self):
# Breakdown due to defective preconditioner
M = np.eye(3)
M[2, 2] = 0
b = np.array([0, 1, 1])
x = np.array([1, 0, 0])
A = np.diag([2, 3, 4])
x, info = gmres(A, b, x0=x, M=M, rtol=1e-15, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= 1e-15 * np.linalg.norm(b)
# The solution should be OK outside null space of M
assert_allclose(M @ (A @ x), M @ b)
def test_defective_matrix_breakdown(self):
# Breakdown due to defective matrix
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
b = np.array([1, 0, 1])
rtol = 1e-8
x, info = gmres(A, b, rtol=rtol, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= rtol * np.linalg.norm(b)
# The solution should be OK outside null space of A
assert_allclose(A @ (A @ x), A @ b)
@pytest.mark.filterwarnings(f"ignore:{CB_TYPE_FILTER}:DeprecationWarning")
def test_callback_type(self):
# The legacy callback type changes meaning of 'maxiter'
np.random.seed(1)
A = np.random.rand(20, 20)
b = np.random.rand(20)
cb_count = [0]
def pr_norm_cb(r):
cb_count[0] += 1
assert isinstance(r, float)
def x_cb(x):
cb_count[0] += 1
assert isinstance(x, np.ndarray)
# 2 iterations is not enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50)
assert info == 2
assert cb_count[0] == 2
# With `callback_type` specified, no warning should be raised
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='legacy')
assert info == 2
assert cb_count[0] == 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='pr_norm')
assert info == 0
assert cb_count[0] > 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=2,
restart=50, callback_type='x')
assert info == 0
assert cb_count[0] == 1
def test_callback_x_monotonic(self):
# Check that callback_type='x' gives monotonic norm decrease
rng = np.random.RandomState(1)
A = rng.rand(20, 20) + np.eye(20)
b = rng.rand(20)
prev_r = [np.inf]
count = [0]
def x_cb(x):
r = np.linalg.norm(A @ x - b)
assert r <= prev_r[0]
prev_r[0] = r
count[0] += 1
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=20,
restart=10, callback_type='x')
assert info == 20
assert count[0] == 20
| TestGMRES |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 14323,
"end": 14833
} | class ____(AttributeHandler):
minimum_path_length = 2
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
if path[1] in ("channel", "runtime_version", "update_id"):
contexts = event.data.get("contexts", {})
ota_updates_context = contexts.get("ota_updates")
if ota_updates_context is None:
ota_updates_context = {}
return [ota_updates_context.get(path[1])]
return []
| ExpoUpdatesAttributeHandler |
python | google__jax | jax/_src/debugger/cli_debugger.py | {
"start": 796,
"end": 4778
} | class ____(cmd.Cmd):
"""A text-based debugger."""
prompt = '(jdb) '
def __init__(self, frames: list[DebuggerFrame], thread_id,
stdin: IO[str] | None = None, stdout: IO[str] | None = None,
completekey: str = "tab"):
super().__init__(stdin=stdin, stdout=stdout, completekey=completekey)
self.use_rawinput = stdin is None
self.frames = frames
self.frame_index = 0
self.thread_id = thread_id
self.intro = 'Entering jdb:'
def current_frame(self):
return self.frames[self.frame_index]
def evaluate(self, expr):
env = {}
curr_frame = self.frames[self.frame_index]
env.update(curr_frame.globals)
env.update(curr_frame.locals)
return eval(expr, {}, env)
def default(self, arg):
"""Evaluates an expression."""
try:
print(repr(self.evaluate(arg)), file=self.stdout)
except:
self._error_message()
def print_backtrace(self):
backtrace = []
backtrace.append('Traceback:')
for frame in self.frames[::-1]:
backtrace.append(f' File "{frame.filename}", line {frame.lineno}')
if frame.offset is None:
backtrace.append(' <no source>')
else:
line = frame.source[frame.offset]
backtrace.append(f' {line.strip()}')
print("\n".join(backtrace), file=self.stdout)
def print_context(self, num_lines=2):
curr_frame = self.frames[self.frame_index]
context = []
context.append(f'> {curr_frame.filename}({curr_frame.lineno})')
for i, line in enumerate(curr_frame.source):
assert curr_frame.offset is not None
if (curr_frame.offset - 1 - num_lines <= i <=
curr_frame.offset + num_lines):
if i == curr_frame.offset:
context.append(f'-> {line}')
else:
context.append(f' {line}')
print("\n".join(context), file=self.stdout)
def _error_message(self):
exc_info = sys.exc_info()[:2]
msg = traceback.format_exception_only(*exc_info)[-1].strip()
print('***', msg, file=self.stdout)
def do_p(self, arg):
"""p expression
Evaluates and prints the value of an expression
"""
try:
print(repr(self.evaluate(arg)), file=self.stdout)
except:
self._error_message()
def do_pp(self, arg):
"""pp expression
Evaluates and pretty-prints the value of an expression
"""
try:
print(pprint.pformat(self.evaluate(arg)), file=self.stdout)
except:
self._error_message()
def do_up(self, _):
"""u(p)
Move up a stack frame.
"""
if self.frame_index == len(self.frames) - 1:
print('At topmost frame.', file=self.stdout)
else:
self.frame_index += 1
self.print_context()
do_u = do_up
def do_down(self, _):
"""d(own)
Move down a stack frame.
"""
if self.frame_index == 0:
print('At bottommost frame.', file=self.stdout)
else:
self.frame_index -= 1
self.print_context()
do_d = do_down
def do_list(self, _):
"""l(ist)
List source code for the current file.
"""
self.print_context(num_lines=5)
do_l = do_list
def do_continue(self, _):
"""c(ont(inue))
Continue the program's execution.
"""
return True
do_c = do_cont = do_continue
def do_quit(self, _):
"""q(uit)\n(exit)
Quit the debugger. The program is given an exit command.
"""
sys.exit(0)
do_q = do_EOF = do_exit = do_quit
def do_where(self, _):
"""w(here)
Prints a stack trace with the most recent frame on the bottom.
'bt' is an alias for this command.
"""
self.print_backtrace()
do_w = do_bt = do_where
def run(self):
while True:
try:
self.cmdloop()
break
except KeyboardInterrupt:
print('--KeyboardInterrupt--', file=sys.stdout)
def run_debugger(frames: list[DebuggerFrame], thread_id: int | None,
**kwargs: Any):
CliDebugger(frames, thread_id, **kwargs).run()
debugger_core.register_debugger("cli", run_debugger, -1)
| CliDebugger |
python | getsentry__sentry | src/social_auth/exceptions.py | {
"start": 1657,
"end": 1835
} | class ____(AuthException):
"""Auth token error."""
def __str__(self) -> str:
msg = super().__str__()
return gettext("Token error: %s") % msg
| AuthTokenError |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 4394,
"end": 15747
} | class ____:
def test_simple(self):
a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
w, v = eig(a)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
v0 = array([1, 1, (1+sqrt(93)/3)/2])
v1 = array([3., 0, -1])
v2 = array([1, 1, (1-sqrt(93)/3)/2])
v0 = v0 / norm(v0)
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
assert_array_almost_equal(w, exact_w)
assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
for i in range(3):
assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
w, v = eig(a, left=1, right=0)
for i in range(3):
assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i])
def test_simple_complex_eig(self):
a = array([[1, 2], [-2, 1]])
w, vl, vr = eig(a, left=1, right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
for i in range(2):
assert_array_almost_equal(a.conj().T @ vl[:, i],
w[i].conj()*vl[:, i])
def test_simple_complex(self):
a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
w, vl, vr = eig(a, left=1, right=1)
for i in range(3):
assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
for i in range(3):
assert_array_almost_equal(a.conj().T @ vl[:, i],
w[i].conj()*vl[:, i])
def test_gh_3054(self):
a = [[1]]
b = [[0]]
w, vr = eig(a, b, homogeneous_eigvals=True)
assert_allclose(w[1, 0], 0)
assert_(w[0, 0] != 0)
assert_allclose(vr, 1)
w, vr = eig(a, b)
assert_equal(w, np.inf)
assert_allclose(vr, 1)
def _check_gen_eig(self, A, B, atol_homog=1e-13, rtol_homog=1e-13,
atol=1e-13, rtol=1e-13):
if B is not None:
A, B = asarray(A), asarray(B)
B0 = B
else:
A = asarray(A)
B0 = B
B = np.eye(*A.shape)
msg = f"\n{A!r}\n{B!r}"
# Eigenvalues in homogeneous coordinates
w, vr = eig(A, B0, homogeneous_eigvals=True)
wt = eigvals(A, B0, homogeneous_eigvals=True)
val1 = A @ vr * w[1, :]
val2 = B @ vr * w[0, :]
for i in range(val1.shape[1]):
assert_allclose(val1[:, i], val2[:, i],
rtol=rtol_homog, atol=atol_homog, err_msg=msg)
if B0 is None:
assert_allclose(w[1, :], 1)
assert_allclose(wt[1, :], 1)
perm = np.lexsort(w)
permt = np.lexsort(wt)
assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7,
err_msg=msg)
length = np.empty(len(vr))
for i in range(len(vr)):
length[i] = norm(vr[:, i])
assert_allclose(length, np.ones(length.size), err_msg=msg,
atol=1e-7, rtol=1e-7)
# Convert homogeneous coordinates
beta_nonzero = (w[1, :] != 0)
wh = w[0, beta_nonzero] / w[1, beta_nonzero]
# Eigenvalues in standard coordinates
w, vr = eig(A, B0)
wt = eigvals(A, B0)
val1 = A @ vr
val2 = B @ vr * w
res = val1 - val2
for i in range(res.shape[1]):
if np.all(isfinite(res[:, i])):
assert_allclose(res[:, i], 0,
rtol=rtol, atol=atol, err_msg=msg)
# try to consistently order eigenvalues, including complex conjugate pairs
w_fin = w[isfinite(w)]
wt_fin = wt[isfinite(wt)]
# prune noise in the real parts
w_fin = -1j * np.real_if_close(1j*w_fin, tol=1e-10)
wt_fin = -1j * np.real_if_close(1j*wt_fin, tol=1e-10)
perm = argsort(abs(w_fin) + w_fin.imag)
permt = argsort(abs(wt_fin) + wt_fin.imag)
assert_allclose(w_fin[perm], wt_fin[permt],
atol=1e-7, rtol=1e-7, err_msg=msg)
length = np.empty(len(vr))
for i in range(len(vr)):
length[i] = norm(vr[:, i])
assert_allclose(length, np.ones(length.size), err_msg=msg)
# Compare homogeneous and nonhomogeneous versions
assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
def test_singular(self):
# Example taken from
# https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array([[22, 34, 31, 31, 17],
[45, 45, 42, 19, 29],
[39, 47, 49, 26, 34],
[27, 31, 26, 21, 15],
[38, 44, 44, 24, 30]])
B = array([[13, 26, 25, 17, 24],
[31, 46, 40, 26, 37],
[26, 40, 19, 25, 25],
[16, 25, 27, 14, 23],
[24, 35, 18, 21, 22]])
with np.errstate(all='ignore'):
self._check_gen_eig(A, B, atol_homog=5e-13, atol=5e-13)
def test_falker(self):
# Test matrices giving some Nan generalized eigenvalues.
M = diag(array([1, 0, 3]))
K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2]))
D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0]))
Z = zeros((3, 3))
I3 = eye(3)
A = np.block([[I3, Z], [Z, -K]])
B = np.block([[Z, I3], [M, D]])
with np.errstate(all='ignore'):
self._check_gen_eig(A, B)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
with np.errstate(all='ignore'):
for k in range(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
def test_make_eigvals(self):
# Step through all paths in _make_eigvals
# Real eigenvalues
rng = np.random.RandomState(1234)
A = symrand(3, rng)
self._check_gen_eig(A, None)
B = symrand(3, rng)
self._check_gen_eig(A, B)
# Complex eigenvalues
A = rng.random((3, 3)) + 1j*rng.random((3, 3))
self._check_gen_eig(A, None)
B = rng.random((3, 3)) + 1j*rng.random((3, 3))
self._check_gen_eig(A, B)
def test_check_finite(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
w, v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
v0 = array([1, 1, (1+sqrt(93)/3)/2])
v1 = array([3., 0, -1])
v2 = array([1, 1, (1-sqrt(93)/3)/2])
v0 = v0 / norm(v0)
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
assert_array_almost_equal(w, exact_w)
assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
for i in range(3):
assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3, 2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes
raises a ValueError."""
A = eye(2)
B = np.arange(9.0).reshape(3, 3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
def test_gh_11577(self):
# https://github.com/scipy/scipy/issues/11577
# `A - lambda B` should have 4 and 8 among the eigenvalues, and this
# was apparently broken on some platforms
A = np.array([[12.0, 28.0, 76.0, 220.0],
[16.0, 32.0, 80.0, 224.0],
[24.0, 40.0, 88.0, 232.0],
[40.0, 56.0, 104.0, 248.0]], dtype='float64')
B = np.array([[2.0, 4.0, 10.0, 28.0],
[3.0, 5.0, 11.0, 29.0],
[5.0, 7.0, 13.0, 31.0],
[9.0, 11.0, 17.0, 35.0]], dtype='float64')
D, V = eig(A, B)
# The problem is ill-conditioned, and two other eigenvalues
# depend on ATLAS/OpenBLAS version, compiler version etc
# see gh-11577 for discussion
#
# NB: it is tempting to use `assert_allclose(D[:2], [4, 8])` instead but
# the ordering of eigenvalues also comes out different on different
# systems depending on who knows what.
with warnings.catch_warnings():
# isclose chokes on inf/nan values
warnings.filterwarnings(
"ignore", "invalid value encountered in multiply", RuntimeWarning)
assert np.isclose(D, 4.0, atol=1e-14).any()
assert np.isclose(D, 8.0, atol=1e-14).any()
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt):
a = np.empty((0, 0), dtype=dt)
w, vr = eig(a)
w_n, vr_n = eig(np.eye(2, dtype=dt))
assert w.shape == (0,)
assert w.dtype == w_n.dtype #eigvals(np.eye(2, dtype=dt)).dtype
assert_allclose(vr, np.empty((0, 0)))
assert vr.shape == (0, 0)
assert vr.dtype == vr_n.dtype
w, vr = eig(a, homogeneous_eigvals=True)
assert w.shape == (2, 0)
assert w.dtype == w_n.dtype
assert vr.shape == (0, 0)
assert vr.dtype == vr_n.dtype
@pytest.mark.parametrize("include_B", [False, True])
@pytest.mark.parametrize("left", [False, True])
@pytest.mark.parametrize("right", [False, True])
@pytest.mark.parametrize("homogeneous_eigvals", [False, True])
@pytest.mark.parametrize("dtype", [np.float32, np.complex128])
def test_nd_input(self, include_B, left, right, homogeneous_eigvals, dtype):
batch_shape = (3, 2)
core_shape = (4, 4)
rng = np.random.default_rng(3249823598235)
A = rng.random(batch_shape + core_shape).astype(dtype)
B = rng.random(batch_shape + core_shape).astype(dtype)
kwargs = dict(right=right, homogeneous_eigvals=homogeneous_eigvals)
if include_B:
res = eig(A, b=B, left=left, **kwargs)
else:
res = eig(A, left=left, **kwargs)
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
if include_B:
ref = eig(A[i, j], b=B[i, j], left=left, **kwargs)
else:
ref = eig(A[i, j], left=left, **kwargs)
if left or right:
for k in range(len(ref)):
assert_allclose(res[k][i, j], ref[k])
else:
assert_allclose(res[i, j], ref)
| TestEig |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 23699,
"end": 24067
} | class ____(themeable):
"""
How to align the plot title and plot subtitle
Parameters
----------
theme_element : Literal["panel", "plot"], default = "panel"
If "panel", the title / subtitle are aligned with respect
to the panels. If "plot", they are aligned with the plot,
excluding the margin space
"""
| plot_title_position |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_management.py | {
"start": 21244,
"end": 37839
} | class ____(object):
"""Manages multiple checkpoints by keeping some and deleting unneeded ones.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(
self,
checkpoint,
directory,
max_to_keep,
keep_checkpoint_every_n_hours=None,
checkpoint_name="ckpt",
step_counter=None,
checkpoint_interval=None,
init_fn=None,
last_checkpoint_step=None,
):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
`CheckpointManager` can be also used for initializing the model if
there is no checkpoints for restoring in `directory`. An example usage is:
>>> import tempfile
>>> tmp_dir = tempfile.mkdtemp()
>>> checkpoint = tf.train.Checkpoint()
>>> init_path = checkpoint.save(os.path.join(tmp_dir, 'init'))
>>> def init_fn():
... # Partially restore the checkpoint from `init_path`.
... checkpoint.restore(init_path)
>>> manager = tf.train.CheckpointManager(
... checkpoint,
... directory=os.path.join(tmp_dir, 'ckpt'),
... max_to_keep=None,
... init_fn=init_fn)
>>> # `restore_or_initialize` will call `init_fn` if there is no existing
>>> # checkpoint in `directory`.
>>> manager.restore_or_initialize()
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
checkpoint_name: Custom name for the checkpoint file.
step_counter: A `tf.Variable` instance for checking the current step
counter value, in case users want to save checkpoints every N steps. It
should be passed if `checkpoint_interval` is not None.
checkpoint_interval: An integer, indicates the minimum step interval
between two checkpoints.
init_fn: Callable. A function to do customized initialization if no
checkpoints are in the directory.
last_checkpoint_step: An integer, indicating the step number of the last
checkpoint saved. This will be used as the starting point for checking
checkpoint_interval against the current step. If None, the last
checkpoint step will be set to None.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
if isinstance(directory, os.PathLike):
directory = os.fspath(directory)
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, checkpoint_name)
self._init_fn = init_fn
if checkpoint_interval is not None:
if step_counter is None:
raise ValueError("`step_counter` should be passed if "
"`checkpoint_interval` is not None.")
self._last_checkpoint_step = last_checkpoint_step
self._step_counter = step_counter
self._checkpoint_interval = checkpoint_interval
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quickly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def directory(self):
return self._directory
@property
def checkpoint_interval(self):
return self._checkpoint_interval
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
_delete_file_if_exists(filename + ".index")
_delete_file_if_exists(filename + ".data-?????-of-?????")
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
@property
def checkpoint(self):
"""Returns the `tf.train.Checkpoint` object."""
return self._checkpoint
def save(self, checkpoint_number=None, check_interval=True, options=None):
"""Creates a new checkpoint and manages it.
Args:
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
check_interval: An optional boolean. The argument is only effective when
`checkpoint_interval` is passed into the manager. If `True`, the manager
will only save the checkpoint if the interval between checkpoints is
larger than `checkpoint_interval`. Otherwise it will always save the
checkpoint unless a checkpoint has already been saved for the current
step.
options: Optional `tf.train.CheckpointOptions` object. This argument only
works with TF2 checkpoint objects. For example, options =
tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properties. `None` if no checkpoint is saved.
"""
if self._checkpoint_interval is not None:
current_step = _evaluate(self._step_counter)
if self._last_checkpoint_step is not None:
if current_step == self._last_checkpoint_step:
return None
if check_interval and current_step < (
self._last_checkpoint_step + self._checkpoint_interval):
return None
self._last_checkpoint_step = current_step
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
session = None
else:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
def _record_and_sweep_state(save_path):
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
# Before deleting anything we update the Checkpoint proto with the new
# checkpoint. We'll go back and correct it after cleaning up old files,
# but a preemption while deleting will be more likely to see the new
# checkpoint this way.
self._record_state()
self._sweep()
# Write out the Checkpoint proto a second time, now without the deleted
# checkpoints.
self._record_state()
# Register `_record_and_sweep_state` as a callback in `CheckpointOptions`
if options is None:
options = checkpoint_options.CheckpointOptions(
experimental_write_callbacks=[_record_and_sweep_state]
)
else:
# We create a copy so that user's `options` instance would not be mutated
# by internal mechanisms.
options = copy.copy(options)
if options.experimental_write_callbacks is None:
options.experimental_write_callbacks = [_record_and_sweep_state]
else:
options.experimental_write_callbacks.append(_record_and_sweep_state)
save_path = self._checkpoint._write(prefix, options=options) # pylint: disable=protected-access
return save_path
def restore_or_initialize(self):
"""Restore items in `checkpoint` from the latest checkpoint file.
This method will first try to restore from the most recent checkpoint in
`directory`. If no checkpoints exist in `directory`, and `init_fn` is
specified, this method will call `init_fn` to do customized
initialization. This can be used to support initialization from pretrained
models.
Note that unlike `tf.train.Checkpoint.restore()`, this method doesn't return
a load status object that users can run assertions on
(e.g. assert_consumed()). Thus to run assertions, users should directly use
`tf.train.Checkpoint.restore()` method.
Returns:
The restored checkpoint path if the latest checkpoint is found and
restored. Otherwise None.
"""
# TODO(chienchunh): When AsyncCheckpoint is used, we may need to force to
# sync until any ongoing async save is done. Otherwise, if this is the first
# checkpoint and _latest_checkpoint has not been updated due to async write,
# this would resort to init_fn instead of restoring from the checkpoin file.
# This should be fixed once AsyncCheckpoint is integrated with the public
# API so that we can rely on CheckpointOptions to tell whether we should
# sync for AsyncCheckpoint.
if self._latest_checkpoint is not None:
self._checkpoint.restore(self._latest_checkpoint)
if self._checkpoint_interval is not None:
self._last_checkpoint_step = _evaluate(self._step_counter)
return self._latest_checkpoint
if self._init_fn is not None:
self._init_fn()
logging.info(
"Customized initialization is done through the passed `init_fn`.")
return None
def sync(self):
"""Wait for any outstanding save or restore operations."""
if self._checkpoint:
self._checkpoint.sync()
| CheckpointManager |
python | wandb__wandb | wandb/vendor/promise-2.3.0/tests/test_extra.py | {
"start": 905,
"end": 14574
} | class ____:
def __init__(self, raises=True):
self.raises = raises
def then(self, s=None, f=None):
if self.raises:
raise Exception("FakeThenPromise raises in 'then'")
def df(value, dtime):
p = Promise()
t = DelayedFulfill(dtime, p, value)
t.start()
return p
def dr(reason, dtime):
p = Promise()
t = DelayedRejection(dtime, p, reason)
t.start()
return p
# Static methods
def test_fulfilled():
p = Promise.fulfilled(4)
assert p.is_fulfilled
assert p.get() == 4
def test_rejected():
p = Promise.rejected(Exception("Static rejected"))
assert p.is_rejected
with raises(Exception) as exc_info:
p.get()
assert str(exc_info.value) == "Static rejected"
# Fulfill
def test_fulfill_self():
p = Promise()
with raises(TypeError) as excinfo:
p.do_resolve(p)
p.get()
# Exceptions
def test_exceptions():
def throws(v):
assert False
p1 = Promise()
p1.then(throws)
p1.do_resolve(5)
p2 = Promise()
p2.catch(throws)
p2.do_reject(Exception())
with raises(Exception) as excinfo:
p2.get()
def test_thrown_exceptions_have_stacktrace():
def throws(v):
assert False
p3 = Promise.resolve("a").then(throws)
with raises(AssertionError) as assert_exc:
p3.get()
assert assert_exc.traceback[-1].path.strpath == __file__
def test_thrown_exceptions_preserve_stacktrace():
def throws(v):
assert False
def after_throws(v):
pass
p3 = Promise.resolve("a").then(throws).then(after_throws)
with raises(AssertionError) as assert_exc:
p3.get()
assert assert_exc.traceback[-1].path.strpath == __file__
# WAIT
# def test_wait_when():
# p1 = df(5, 0.01)
# assert p1.is_pending
# p1._wait()
# assert p1.is_fulfilled
def test_wait_if():
p1 = Promise()
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
# def test_wait_timeout():
# p1 = df(5, 0.1)
# assert p1.is_pending
# with raises(Exception) as exc_info:
# p1._wait(timeout=0.05)
# assert str(exc_info.value) == "Timeout"
# assert p1.is_pending
# p1._wait()
# assert p1.is_fulfilled
# # GET
# def test_get_when():
# p1 = df(5, 0.01)
# assert p1.is_pending
# v = p1.get()
# assert p1.is_fulfilled
# assert 5 == v
def test_get_if():
p1 = Promise()
p1.do_resolve(5)
v = p1.get()
assert p1.is_fulfilled
assert 5 == v
# def test_get_timeout():
# p1 = df(5, 0.1)
# assert p1.is_pending
# with raises(Exception) as exc_info:
# p1._wait(timeout=0.05)
# assert str(exc_info.value) == "Timeout"
# assert p1.is_pending
# v = p1.get()
# assert p1.is_fulfilled
# assert 5 == v
# Promise.all
def test_promise_all_when():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, p2])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.do_resolve(10)
p2._wait()
pl._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pl.get()[0]
assert 10 == pl.get()[1]
def test_promise_all_when_mixed_promises():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, 32, p2, False, True])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.do_resolve(10)
p2._wait()
pl._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert pl.get() == [5, 32, 10, False, True]
def test_promise_all_when_if_no_promises():
pl = Promise.all([10, 32, False, True])
assert pl.get() == [10, 32, False, True]
def test_promise_all_if():
p1 = Promise()
p2 = Promise()
pd1 = Promise.all([p1, p2])
pd2 = Promise.all([p1])
pd3 = Promise.all([])
pd3._wait()
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
assert pd3.is_fulfilled
p1.do_resolve(5)
p1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.do_resolve(10)
p2._wait()
pd1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pd1.get()[0]
assert 5 == pd2.get()[0]
assert 10 == pd1.get()[1]
assert [] == pd3.get()
# promise_for_dict
@fixture(params=[Promise.for_dict, free_promise_for_dict])
def promise_for_dict(request):
return request.param
def test_dict_promise_when(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd1 = promise_for_dict(d)
pd2 = promise_for_dict({"a": p1})
pd3 = promise_for_dict({})
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
pd3._wait()
assert pd3.is_fulfilled
p1.do_resolve(5)
p1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.do_resolve(10)
p2._wait()
pd1._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pd1.get()["a"]
assert 5 == pd2.get()["a"]
assert 10 == pd1.get()["b"]
assert {} == pd3.get()
def test_dict_promise_if(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd = promise_for_dict(d)
assert p1.is_pending
assert p2.is_pending
assert pd.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd.is_pending
p2.do_resolve(10)
p2._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
# pd._wait()
# assert pd.is_fulfilled
# assert 5 == p1.get()
# assert 10 == p2.get()
# assert 5 == pd.get()["a"]
# assert 10 == pd.get()["b"]
def test_done():
counter = [0]
r = Promise()
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
def end(_):
r.do_resolve(None)
p = Promise()
p.done(inc, dec)
p.done(inc, dec)
p.done(end)
p.do_resolve(4)
Promise.wait(r)
assert counter[0] == 2
r = Promise()
counter = [0]
p = Promise()
p.done(inc, dec)
p.done(inc, dec)
p.done(None, end)
p.do_reject(Exception())
Promise.wait(r)
assert counter[0] == -2
def test_done_all():
counter = [0]
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
p = Promise()
r = Promise()
p.done_all()
p.done_all([(inc, dec)])
p.done_all(
[
(inc, dec),
(inc, dec),
{"success": inc, "failure": dec},
lambda _: r.do_resolve(None),
]
)
p.do_resolve(4)
Promise.wait(r)
assert counter[0] == 4
p = Promise()
r = Promise()
p.done_all()
p.done_all([inc])
p.done_all([(inc, dec)])
p.done_all(
[
(inc, dec),
{"success": inc, "failure": dec},
(None, lambda _: r.do_resolve(None)),
]
)
p.do_reject(Exception("Uh oh!"))
Promise.wait(r)
assert counter[0] == 1
def test_then_all():
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{"success": (lambda x: x + x), "failure": (lambda r: 2)},
]
results = (
p.then_all()
+ p.then_all([lambda x: x])
+ p.then_all([(lambda x: x * x, lambda r: 1)])
+ p.then_all(handlers)
)
p.do_resolve(4)
assert [r.get() for r in results] == [4, 16, 16, 8]
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{"success": (lambda x: x + x), "failure": (lambda r: 2)},
]
results = (
p.then_all()
+ p.then_all([(lambda x: x * x, lambda r: 1)])
+ p.then_all(handlers)
)
p.do_reject(Exception())
assert [r.get() for r in results] == [1, 1, 2]
def test_do_resolve():
p1 = Promise(lambda resolve, reject: resolve(0))
assert p1.get() == 0
assert p1.is_fulfilled
def test_do_resolve_fail_on_call():
def raises(resolve, reject):
raise Exception("Fails")
p1 = Promise(raises)
assert not p1.is_fulfilled
assert str(p1.reason) == "Fails"
def test_catch():
p1 = Promise(lambda resolve, reject: resolve(0))
p2 = p1.then(lambda value: 1 / value).catch(lambda e: e).then(lambda e: type(e))
assert p2.get() == ZeroDivisionError
assert p2.is_fulfilled
def test_is_thenable_promise():
promise = Promise()
assert is_thenable(promise)
def test_is_thenable_then_object():
promise = FakeThenPromise()
assert not is_thenable(promise)
def test_is_thenable_future():
promise = Future()
assert is_thenable(promise)
def test_is_thenable_simple_object():
assert not is_thenable(object())
@fixture(params=[Promise.resolve])
def resolve(request):
return request.param
def test_resolve_promise(resolve):
promise = Promise()
assert resolve(promise) == promise
def test_resolve_then_object(resolve):
promise = FakeThenPromise(raises=False)
p = resolve(promise)
assert isinstance(p, Promise)
def test_resolve_future(resolve):
future = Future()
promise = resolve(future)
assert promise.is_pending
future.set_result(1)
assert promise.get() == 1
assert promise.is_fulfilled
def test_resolve_future_rejected(resolve):
future = Future()
promise = resolve(future)
assert promise.is_pending
future.set_exception(Exception("Future rejected"))
assert promise.is_rejected
assert_exception(promise.reason, Exception, "Future rejected")
def test_resolve_object(resolve):
val = object()
promised = resolve(val)
assert isinstance(promised, Promise)
assert promised.get() == val
def test_resolve_promise_subclass():
class MyPromise(Promise):
pass
p = Promise()
p.do_resolve(10)
m_p = MyPromise.resolve(p)
assert isinstance(m_p, MyPromise)
assert m_p.get() == p.get()
def test_promise_repr_pending():
promise = Promise()
assert repr(promise) == "<Promise at {} pending>".format(hex(id(promise)))
def test_promise_repr_pending():
val = {1: 2}
promise = Promise.fulfilled(val)
promise._wait()
assert repr(promise) == "<Promise at {} fulfilled with {}>".format(
hex(id(promise)), repr(val)
)
def test_promise_repr_fulfilled():
val = {1: 2}
promise = Promise.fulfilled(val)
promise._wait()
assert repr(promise) == "<Promise at {} fulfilled with {}>".format(
hex(id(promise)), repr(val)
)
def test_promise_repr_rejected():
err = Exception("Error!")
promise = Promise.rejected(err)
promise._wait()
assert repr(promise) == "<Promise at {} rejected with {}>".format(
hex(id(promise)), repr(err)
)
def test_promise_loop():
def by_two(result):
return result * 2
def executor(resolve, reject):
resolve(Promise.resolve(1).then(lambda v: Promise.resolve(v).then(by_two)))
p = Promise(executor)
assert p.get(.1) == 2
def test_resolve_future_like(resolve):
class CustomThenable(object):
def add_done_callback(self, f):
f(True)
def done(self):
return True
def exception(self):
pass
def result(self):
return True
instance = CustomThenable()
promise = resolve(instance)
assert promise.get() == True
def sum_function(a, b):
return a + b
def test_promisify_function_resolved(resolve):
promisified_func = promisify(sum_function)
result = promisified_func(1, 2)
assert isinstance(result, Promise)
assert result.get() == 3
def test_promisify_function_rejected(resolve):
promisified_func = promisify(sum_function)
result = promisified_func(None, None)
assert isinstance(result, Promise)
with raises(Exception) as exc_info_promise:
result.get()
with raises(Exception) as exc_info:
sum_function(None, None)
assert str(exc_info_promise.value) == str(exc_info.value)
def test_promises_with_only_then():
context = {"success": False}
error = RuntimeError("Ooops!")
promise1 = Promise(
lambda resolve, reject: context.update({"promise1_reject": reject})
)
promise2 = promise1.then(lambda x: None)
promise3 = promise1.then(lambda x: None)
context["promise1_reject"](error)
promise2._wait()
promise3._wait()
assert promise2.reason == error
assert promise3.reason == error
def test_promises_promisify_still_works_but_deprecated_for_non_callables():
x = promisify(1)
assert isinstance(x, Promise)
assert x.get() == 1
# def test_promise_loop():
# values = Promise.resolve([1, None, 2])
# def on_error(error):
# error
# def executor(resolve, reject):
# resolve(Promise.resolve(values).then(lambda values: Promise.all([Promise.resolve(values[0])]).catch(on_error)))
# p = Promise(executor)
# assert p.get(.1) == 2
| FakeThenPromise |
python | joblib__joblib | joblib/memory.py | {
"start": 33811,
"end": 34897
} | class ____(MemorizedFunc):
async def __call__(self, *args, **kwargs):
out = self._cached_call(args, kwargs, shelving=False)
out = await out if asyncio.iscoroutine(out) else out
return out[0] # Don't return metadata
async def call_and_shelve(self, *args, **kwargs):
out = self._cached_call(args, kwargs, shelving=True)
out = await out if asyncio.iscoroutine(out) else out
return out[0] # Don't return metadata
async def call(self, *args, **kwargs):
out = super().call(*args, **kwargs)
return await out if asyncio.iscoroutine(out) else out
async def _call(self, call_id, args, kwargs, shelving=False):
self._before_call(args, kwargs)
start_time = time.time()
output = await self.func(*args, **kwargs)
return self._after_call(call_id, args, kwargs, shelving, output, start_time)
###############################################################################
# class `Memory`
###############################################################################
| AsyncMemorizedFunc |
python | huggingface__transformers | src/transformers/models/textnet/image_processing_textnet_fast.py | {
"start": 1376,
"end": 5491
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 640}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_convert_rgb = True
size_divisor = 32
valid_kwargs = TextNetImageProcessorKwargs
def __init__(self, **kwargs: Unpack[TextNetImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[TextNetImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
size_divisor: int = 32,
**kwargs,
) -> "torch.Tensor":
if size.shortest_edge:
new_size = get_resize_output_image_size(
image,
size=size.shortest_edge,
default_to_square=False,
input_data_format=ChannelDimension.FIRST,
)
else:
raise ValueError(f"Size must contain 'shortest_edge' key. Got {size}.")
# ensure height and width are divisible by size_divisor
height, width = new_size
if height % size_divisor != 0:
height += size_divisor - (height % size_divisor)
if width % size_divisor != 0:
width += size_divisor - (width % size_divisor)
return super().resize(
image, SizeDict(height=height, width=width), interpolation=interpolation, antialias=antialias
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
size_divisor: int,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, interpolation=interpolation, size_divisor=size_divisor
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["TextNetImageProcessorFast"]
| TextNetImageProcessorFast |
python | ray-project__ray | python/ray/autoscaler/v2/instance_manager/subscribers/cloud_instance_updater.py | {
"start": 393,
"end": 3207
} | class ____(InstanceUpdatedSubscriber):
"""CloudInstanceUpdater is responsible for launching
new instances and terminating cloud instances
It requests the cloud instance provider to launch new instances when
there are new instance requests (with REQUESTED status change).
It requests the cloud instance provider to terminate instances when
there are new instance terminations (with TERMINATING status change).
The cloud instance APIs are async and non-blocking.
"""
def __init__(
self,
cloud_provider: ICloudInstanceProvider,
) -> None:
self._cloud_provider = cloud_provider
def notify(self, events: List[InstanceUpdateEvent]) -> None:
new_requests = [
event for event in events if event.new_instance_status == Instance.REQUESTED
]
new_terminations = [
event
for event in events
if event.new_instance_status == Instance.TERMINATING
]
self._launch_new_instances(new_requests)
self._terminate_instances(new_terminations)
def _terminate_instances(self, new_terminations: List[InstanceUpdateEvent]):
"""
Terminate cloud instances through cloud provider.
Args:
new_terminations: List of new instance terminations.
"""
if not new_terminations:
logger.debug("No instances to terminate.")
return
# Terminate the instances.
cloud_instance_ids = [event.cloud_instance_id for event in new_terminations]
# This is an async call.
self._cloud_provider.terminate(
ids=cloud_instance_ids, request_id=str(uuid.uuid4())
)
def _launch_new_instances(self, new_requests: List[InstanceUpdateEvent]):
"""
Launches new instances by requesting the cloud provider.
Args:
new_requests: List of new instance requests.
"""
if not new_requests:
logger.debug("No instances to launch.")
return
# Group new requests by launch request id.
requests_by_launch_request_id = defaultdict(list)
for event in new_requests:
assert (
event.launch_request_id
), "Launch request id should have been set by the reconciler"
requests_by_launch_request_id[event.launch_request_id].append(event)
for launch_request_id, events in requests_by_launch_request_id.items():
request_shape = defaultdict(int)
for event in events:
request_shape[event.instance_type] += 1
# Make requests to the cloud provider.
self._cloud_provider.launch(
shape=request_shape, request_id=launch_request_id
)
| CloudInstanceUpdater |
python | spack__spack | lib/spack/spack/platforms/__init__.py | {
"start": 855,
"end": 1627
} | class ____:
"""Class used to pickle a callable that may substitute either
_platform or _all_platforms. Lambda or nested functions are
not pickleable.
"""
def __init__(self, return_value):
self.return_value = return_value
def __call__(self):
return self.return_value
@contextlib.contextmanager
def use_platform(new_platform):
global host
import spack.config
assert isinstance(new_platform, Platform), f'"{new_platform}" must be an instance of Platform'
original_host_fn = host
try:
host = _PickleableCallable(new_platform)
spack.config.CONFIG.clear_caches()
yield new_platform
finally:
host = original_host_fn
spack.config.CONFIG.clear_caches()
| _PickleableCallable |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/layout.py | {
"start": 19013,
"end": 21627
} | class ____(LayoutOperatorBase):
"""Operator for torch.expand() operation."""
def __init__(self):
"""Initialize ExpandOperator."""
super().__init__("expand")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.expand"
def can_produce(self, output_spec: Spec) -> bool:
"""Expand can produce any tensor output."""
if not isinstance(output_spec, TensorSpec):
return False
# Expand can produce any tensor with at least one dimension
return len(output_spec.size) > 0
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input spec for expand operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ExpandOperator can only produce TensorSpec outputs")
# torch.expand() broadcasts a tensor to a new shape
# For expand to work, each dimension of the input must either:
# 1. Match the corresponding output dimension
# 2. Be 1 (to be broadcasted)
# 3. Not exist (input can have fewer dimensions than output)
# Generate input size with same or fewer dimensions
output_size = output_spec.size
input_ndim = random.randint(1, len(output_size))
# Create input size by choosing dimensions to broadcast
input_size = []
for i in range(input_ndim):
output_dim_idx = len(output_size) - input_ndim + i
output_dim = output_size[output_dim_idx]
# Randomly choose to either match the output dimension or use 1 for broadcasting
# Use 1 with higher probability to test broadcasting behavior
if random.random() < 0.6 and output_dim > 1:
input_size.append(1)
else:
input_size.append(output_dim)
input_size = tuple(input_size)
# Create input tensor spec
from torchfuzz.tensor_fuzzer import fuzz_valid_stride
input_stride = fuzz_valid_stride(input_size)
return [
TensorSpec(size=input_size, stride=input_stride, dtype=output_spec.dtype)
]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for expand operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ExpandOperator can only produce TensorSpec outputs")
shape_str = str(list(output_spec.size))
return f"{output_name} = {input_names[0]}.expand({shape_str})"
| ExpandOperator |
python | pypa__pipenv | pipenv/patched/pip/_internal/commands/cache.py | {
"start": 517,
"end": 8197
} | class ____(Command):
"""
Inspect and manage pip's wheel cache.
Subcommands:
- dir: Show the cache directory.
- info: Show information about the cache.
- list: List filenames of packages stored in the cache.
- remove: Remove one or more package from the cache.
- purge: Remove all items from the cache.
``<pattern>`` can be a glob expression or a package name.
"""
ignore_require_venv = True
usage = """
%prog dir
%prog info
%prog list [<pattern>] [--format=[human, abspath]]
%prog remove <pattern>
%prog purge
"""
def add_options(self) -> None:
self.cmd_opts.add_option(
"--format",
action="store",
dest="list_format",
default="human",
choices=("human", "abspath"),
help="Select the output format among: human (default) or abspath",
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
handlers = {
"dir": self.get_cache_dir,
"info": self.get_cache_info,
"list": self.list_cache_items,
"remove": self.remove_cache_items,
"purge": self.purge_cache,
}
if not options.cache_dir:
logger.error("pip cache commands can not function since cache is disabled.")
return ERROR
# Determine action
if not args or args[0] not in handlers:
logger.error(
"Need an action (%s) to perform.",
", ".join(sorted(handlers)),
)
return ERROR
action = args[0]
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def get_cache_dir(self, options: Values, args: List[Any]) -> None:
if args:
raise CommandError("Too many arguments")
logger.info(options.cache_dir)
def get_cache_info(self, options: Values, args: List[Any]) -> None:
if args:
raise CommandError("Too many arguments")
num_http_files = len(self._find_http_files(options))
num_packages = len(self._find_wheels(options, "*"))
http_cache_location = self._cache_dir(options, "http-v2")
old_http_cache_location = self._cache_dir(options, "http")
wheels_cache_location = self._cache_dir(options, "wheels")
http_cache_size = filesystem.format_size(
filesystem.directory_size(http_cache_location)
+ filesystem.directory_size(old_http_cache_location)
)
wheels_cache_size = filesystem.format_directory_size(wheels_cache_location)
message = (
textwrap.dedent(
"""
Package index page cache location (pip v23.3+): {http_cache_location}
Package index page cache location (older pips): {old_http_cache_location}
Package index page cache size: {http_cache_size}
Number of HTTP files: {num_http_files}
Locally built wheels location: {wheels_cache_location}
Locally built wheels size: {wheels_cache_size}
Number of locally built wheels: {package_count}
""" # noqa: E501
)
.format(
http_cache_location=http_cache_location,
old_http_cache_location=old_http_cache_location,
http_cache_size=http_cache_size,
num_http_files=num_http_files,
wheels_cache_location=wheels_cache_location,
package_count=num_packages,
wheels_cache_size=wheels_cache_size,
)
.strip()
)
logger.info(message)
def list_cache_items(self, options: Values, args: List[Any]) -> None:
if len(args) > 1:
raise CommandError("Too many arguments")
if args:
pattern = args[0]
else:
pattern = "*"
files = self._find_wheels(options, pattern)
if options.list_format == "human":
self.format_for_human(files)
else:
self.format_for_abspath(files)
def format_for_human(self, files: List[str]) -> None:
if not files:
logger.info("No locally built wheels cached.")
return
results = []
for filename in files:
wheel = os.path.basename(filename)
size = filesystem.format_file_size(filename)
results.append(f" - {wheel} ({size})")
logger.info("Cache contents:\n")
logger.info("\n".join(sorted(results)))
def format_for_abspath(self, files: List[str]) -> None:
if files:
logger.info("\n".join(sorted(files)))
def remove_cache_items(self, options: Values, args: List[Any]) -> None:
if len(args) > 1:
raise CommandError("Too many arguments")
if not args:
raise CommandError("Please provide a pattern")
files = self._find_wheels(options, args[0])
no_matching_msg = "No matching packages"
if args[0] == "*":
# Only fetch http files if no specific pattern given
files += self._find_http_files(options)
else:
# Add the pattern to the log message
no_matching_msg += f' for pattern "{args[0]}"'
if not files:
logger.warning(no_matching_msg)
bytes_removed = 0
for filename in files:
bytes_removed += os.stat(filename).st_size
os.unlink(filename)
logger.verbose("Removed %s", filename)
logger.info("Files removed: %s (%s)", len(files), format_size(bytes_removed))
def purge_cache(self, options: Values, args: List[Any]) -> None:
if args:
raise CommandError("Too many arguments")
return self.remove_cache_items(options, ["*"])
def _cache_dir(self, options: Values, subdir: str) -> str:
return os.path.join(options.cache_dir, subdir)
def _find_http_files(self, options: Values) -> List[str]:
old_http_dir = self._cache_dir(options, "http")
new_http_dir = self._cache_dir(options, "http-v2")
return filesystem.find_files(old_http_dir, "*") + filesystem.find_files(
new_http_dir, "*"
)
def _find_wheels(self, options: Values, pattern: str) -> List[str]:
wheel_dir = self._cache_dir(options, "wheels")
# The wheel filename format, as specified in PEP 427, is:
# {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
#
# Additionally, non-alphanumeric values in the distribution are
# normalized to underscores (_), meaning hyphens can never occur
# before `-{version}`.
#
# Given that information:
# - If the pattern we're given contains a hyphen (-), the user is
# providing at least the version. Thus, we can just append `*.whl`
# to match the rest of it.
# - If the pattern we're given doesn't contain a hyphen (-), the
# user is only providing the name. Thus, we append `-*.whl` to
# match the hyphen before the version, followed by anything else.
#
# PEP 427: https://www.python.org/dev/peps/pep-0427/
pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
return filesystem.find_files(wheel_dir, pattern)
| CacheCommand |
python | spyder-ide__spyder | spyder/plugins/projects/widgets/projectdialog.py | {
"start": 10517,
"end": 12367
} | class ____(BaseProjectPage):
"""Existing directory project page."""
LOCATION_TEXT = _("Project path")
LOCATION_TIP = _("Select the directory to use for the project")
def get_name(self):
return _("Existing directory")
def get_icon(self):
return self.create_icon("DirClosedIcon")
def setup_page(self):
description = QLabel(
_("Create a Spyder project in an existing directory")
)
description.setWordWrap(True)
description.setFont(self._description_font)
layout = QVBoxLayout()
layout.addWidget(description)
layout.addSpacing(5 * AppStyle.MarginSize)
layout.addWidget(self._location)
layout.addSpacing(7 * AppStyle.MarginSize)
layout.addWidget(self._validation_label)
layout.addStretch()
self.setLayout(layout)
@property
def project_location(self):
return osp.normpath(self._location.textbox.text())
def validate_page(self):
# Clear validation state
self._validation_label.setVisible(False)
self._location.status_action.setVisible(False)
# Avoid using "." as location, which is the result of os.normpath("")
location_text = self._location.textbox.text()
location = osp.normpath(location_text) if location_text else ""
# Perform validation
reasons = self._validate_location(location)
if reasons:
self._validation_label.set_text(
self._compose_failed_validation_text(reasons)
)
self._validation_label.setVisible(True)
return False if reasons else True
# =============================================================================
# ---- Dialog
# =============================================================================
| ExistingDirectoryPage |
python | wandb__wandb | wandb/sdk/artifacts/_generated/input_types.py | {
"start": 1429,
"end": 1718
} | class ____(GQLInput):
entity_name: str = Field(alias="entityName")
old_project_name: str = Field(alias="oldProjectName")
new_project_name: str = Field(alias="newProjectName")
client_mutation_id: Optional[str] = Field(alias="clientMutationId", default=None)
| RenameProjectInput |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 1929,
"end": 2742
} | class ____(ModelOutput):
r"""
last_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
intermediate_activations (`tuple(torch.FloatTensor)`, *optional*):
Intermediate activations that can be used to compute hidden states of the model at various layers.
"""
last_hidden_states: Optional[torch.FloatTensor] = None
intermediate_activations: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for model's outputs that also contains a pooling of the last hidden states as well as intermediate
activations that can be used by the model at later stages.
"""
)
| BaseModelOutputWithIntermediateActivations |
python | kamyu104__LeetCode-Solutions | Python/minimum-adjacent-swaps-to-reach-the-kth-smallest-number.py | {
"start": 39,
"end": 1641
} | class ____(object):
def getMinSwaps(self, num, k):
"""
:type num: str
:type k: int
:rtype: int
"""
def next_permutation(nums, begin, end):
def reverse(nums, begin, end):
left, right = begin, end-1
while left < right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
k, l = begin-1, begin
for i in reversed(xrange(begin, end-1)):
if nums[i] < nums[i+1]:
k = i
break
else:
reverse(nums, begin, end)
return False
for i in reversed(xrange(k+1, end)):
if nums[i] > nums[k]:
l = i
break
nums[k], nums[l] = nums[l], nums[k]
reverse(nums, k+1, end)
return True
new_num = list(num)
while k:
next_permutation(new_num, 0, len(new_num))
k -= 1
result = 0
for i in xrange(len(new_num)):
if new_num[i] == num[i]:
continue
# // greedily move the one with the least cost from new_num to num without missing optimal cost
for j in xrange(i+1, len(new_num)):
if new_num[j] == num[i]:
break
result += j-i
for j in reversed(xrange(i+1, j+1)):
new_num[j], new_num[j-1] = new_num[j-1], new_num[j]
return result
| Solution |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 11098,
"end": 11170
} | class ____(HTTPServerError):
status_code = 500
| HTTPInternalServerError |
python | RaRe-Technologies__gensim | gensim/topic_coherence/indirect_confirmation_measure.py | {
"start": 7495,
"end": 12777
} | class ____:
"""Lazily compute context vectors for topic segments.
Parameters
----------
measure: str
Confirmation measure.
topics: list of numpy.array
Topics.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
gamma: float
Value for computing vectors.
Attributes
----------
sim_cache: dict
Cache similarities between tokens (pairs of word ids), e.g. (1, 2).
context_vector_cache: dict
Mapping from (segment, topic_words) --> context_vector.
Example
-------
.. sourcecode:: pycon
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure, text_analysis
>>> import numpy as np
>>>
>>> # create measure, topics
>>> measure = 'nlr'
>>> topics = [np.array([1, 2])]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5)
>>> cont_vect_comp = indirect_confirmation_measure.ContextVectorComputer(measure, topics, accumulator, 1)
>>> cont_vect_comp.mapping
{1: 0, 2: 1}
>>> cont_vect_comp.vocab_size
2
"""
def __init__(self, measure, topics, accumulator, gamma):
if measure == 'nlr':
self.similarity = _pair_npmi
else:
raise ValueError(
"The direct confirmation measure you entered is not currently supported.")
self.mapping = _map_to_contiguous(topics)
self.vocab_size = len(self.mapping)
self.accumulator = accumulator
self.gamma = gamma
self.sim_cache = {}
self.context_vector_cache = {}
def __getitem__(self, idx):
return self.compute_context_vector(*idx)
def compute_context_vector(self, segment_word_ids, topic_word_ids):
"""Check if (segment_word_ids, topic_word_ids) context vector has been cached.
Parameters
----------
segment_word_ids: list
Ids of words in segment.
topic_word_ids: list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
If context vector has been cached, then return corresponding context vector,
else compute, cache, and return.
"""
key = _key_for_segment(segment_word_ids, topic_word_ids)
context_vector = self.context_vector_cache.get(key, None)
if context_vector is None:
context_vector = self._make_seg(segment_word_ids, topic_word_ids)
self.context_vector_cache[key] = context_vector
return context_vector
def _make_seg(self, segment_word_ids, topic_word_ids):
"""Return context vectors for segmentation (Internal helper function).
Parameters
----------
segment_word_ids : iterable or int
Ids of words in segment.
topic_word_ids : list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
Matrix in Compressed Sparse Row format
"""
context_vector = sps.lil_matrix((self.vocab_size, 1))
if not hasattr(segment_word_ids, '__iter__'):
segment_word_ids = (segment_word_ids,)
for w_j in topic_word_ids:
idx = (self.mapping[w_j], 0)
for pair in (tuple(sorted((w_i, w_j))) for w_i in segment_word_ids):
if pair not in self.sim_cache:
self.sim_cache[pair] = self.similarity(pair, self.accumulator)
context_vector[idx] += self.sim_cache[pair] ** self.gamma
return context_vector.tocsr()
def _pair_npmi(pair, accumulator):
"""Compute normalized pairwise mutual information (**NPMI**) between a pair of words.
Parameters
----------
pair : (int, int)
The pair of words (word_id1, word_id2).
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
Return
------
float
NPMI between a pair of words.
"""
return log_ratio_measure([[pair]], accumulator, True)[0]
def _cossim(cv1, cv2):
return cv1.T.dot(cv2)[0, 0] / (_magnitude(cv1) * _magnitude(cv2))
def _magnitude(sparse_vec):
return np.sqrt(np.sum(sparse_vec.data ** 2))
def _map_to_contiguous(ids_iterable):
uniq_ids = {}
n = 0
for id_ in itertools.chain.from_iterable(ids_iterable):
if id_ not in uniq_ids:
uniq_ids[id_] = n
n += 1
return uniq_ids
def _key_for_segment(segment, topic_words):
"""A segment may have a single number of an iterable of them."""
segment_key = tuple(segment) if hasattr(segment, '__iter__') else segment
return segment_key, topic_words
| ContextVectorComputer |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 2981,
"end": 3088
} | class ____(JoseError):
error = "expired_token"
description = "The token is expired"
| ExpiredTokenError |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/components/reward_providers/gail_reward_provider.py | {
"start": 2655,
"end": 11405
} | class ____(torch.nn.Module):
gradient_penalty_weight = 10.0
z_size = 128
alpha = 0.0005
mutual_information = 0.5
EPSILON = 1e-7
initial_beta = 0.0
def __init__(self, specs: BehaviorSpec, settings: GAILSettings) -> None:
super().__init__()
self._use_vail = settings.use_vail
self._settings = settings
encoder_settings = settings.network_settings
if encoder_settings.memory is not None:
encoder_settings.memory = None
logger.warning(
"memory was specified in network_settings but is not supported by GAIL. It is being ignored."
)
self._action_flattener = ActionFlattener(specs.action_spec)
unencoded_size = (
self._action_flattener.flattened_size + 1 if settings.use_actions else 0
) # +1 is for dones
self.encoder = NetworkBody(
specs.observation_specs, encoder_settings, unencoded_size
)
estimator_input_size = encoder_settings.hidden_units
if settings.use_vail:
estimator_input_size = self.z_size
self._z_sigma = torch.nn.Parameter(
torch.ones((self.z_size), dtype=torch.float), requires_grad=True
)
self._z_mu_layer = linear_layer(
encoder_settings.hidden_units,
self.z_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=0.1,
)
self._beta = torch.nn.Parameter(
torch.tensor(self.initial_beta, dtype=torch.float), requires_grad=False
)
self._estimator = torch.nn.Sequential(
linear_layer(estimator_input_size, 1, kernel_gain=0.2), torch.nn.Sigmoid()
)
def get_action_input(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Creates the action Tensor. In continuous case, corresponds to the action. In
the discrete case, corresponds to the concatenation of one hot action Tensors.
"""
return self._action_flattener.forward(AgentAction.from_buffer(mini_batch))
def get_state_inputs(self, mini_batch: AgentBuffer) -> List[torch.Tensor]:
"""
Creates the observation input.
"""
n_obs = len(self.encoder.processors)
np_obs = ObsUtil.from_buffer(mini_batch, n_obs)
# Convert to tensors
tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs]
return tensor_obs
def compute_estimate(
self, mini_batch: AgentBuffer, use_vail_noise: bool = False
) -> torch.Tensor:
"""
Given a mini_batch, computes the estimate (How much the discriminator believes
the data was sampled from the demonstration data).
:param mini_batch: The AgentBuffer of data
:param use_vail_noise: Only when using VAIL : If true, will sample the code, if
false, will return the mean of the code.
"""
inputs = self.get_state_inputs(mini_batch)
if self._settings.use_actions:
actions = self.get_action_input(mini_batch)
dones = torch.as_tensor(
mini_batch[BufferKey.DONE], dtype=torch.float, device=default_device()
).unsqueeze(1)
action_inputs = torch.cat([actions, dones], dim=1)
hidden, _ = self.encoder(inputs, action_inputs)
else:
hidden, _ = self.encoder(inputs)
z_mu: Optional[torch.Tensor] = None
if self._settings.use_vail:
z_mu = self._z_mu_layer(hidden)
hidden = z_mu + torch.randn_like(z_mu) * self._z_sigma * use_vail_noise
estimate = self._estimator(hidden)
return estimate, z_mu
def compute_loss(
self, policy_batch: AgentBuffer, expert_batch: AgentBuffer
) -> torch.Tensor:
"""
Given a policy mini_batch and an expert mini_batch, computes the loss of the discriminator.
"""
total_loss = torch.zeros(1, device=default_device())
stats_dict: Dict[str, np.ndarray] = {}
policy_estimate, policy_mu = self.compute_estimate(
policy_batch, use_vail_noise=True
)
expert_estimate, expert_mu = self.compute_estimate(
expert_batch, use_vail_noise=True
)
stats_dict["Policy/GAIL Policy Estimate"] = policy_estimate.mean().item()
stats_dict["Policy/GAIL Expert Estimate"] = expert_estimate.mean().item()
discriminator_loss = -(
torch.log(expert_estimate + self.EPSILON)
+ torch.log(1.0 - policy_estimate + self.EPSILON)
).mean()
stats_dict["Losses/GAIL Loss"] = discriminator_loss.item()
total_loss += discriminator_loss
if self._settings.use_vail:
# KL divergence loss (encourage latent representation to be normal)
kl_loss = torch.mean(
-torch.sum(
1
+ (self._z_sigma**2).log()
- 0.5 * expert_mu**2
- 0.5 * policy_mu**2
- (self._z_sigma**2),
dim=1,
)
)
vail_loss = self._beta * (kl_loss - self.mutual_information)
with torch.no_grad():
self._beta.data = torch.max(
self._beta + self.alpha * (kl_loss - self.mutual_information),
torch.tensor(0.0),
)
total_loss += vail_loss
stats_dict["Policy/GAIL Beta"] = self._beta.item()
stats_dict["Losses/GAIL KL Loss"] = kl_loss.item()
if self.gradient_penalty_weight > 0.0:
gradient_magnitude_loss = (
self.gradient_penalty_weight
* self.compute_gradient_magnitude(policy_batch, expert_batch)
)
stats_dict["Policy/GAIL Grad Mag Loss"] = gradient_magnitude_loss.item()
total_loss += gradient_magnitude_loss
return total_loss, stats_dict
def compute_gradient_magnitude(
self, policy_batch: AgentBuffer, expert_batch: AgentBuffer
) -> torch.Tensor:
"""
Gradient penalty from https://arxiv.org/pdf/1704.00028. Adds stability esp.
for off-policy. Compute gradients w.r.t randomly interpolated input.
"""
policy_inputs = self.get_state_inputs(policy_batch)
expert_inputs = self.get_state_inputs(expert_batch)
interp_inputs = []
for policy_input, expert_input in zip(policy_inputs, expert_inputs):
obs_epsilon = torch.rand(policy_input.shape, device=policy_input.device)
interp_input = obs_epsilon * policy_input + (1 - obs_epsilon) * expert_input
interp_input.requires_grad = True # For gradient calculation
interp_inputs.append(interp_input)
if self._settings.use_actions:
policy_action = self.get_action_input(policy_batch)
expert_action = self.get_action_input(expert_batch)
action_epsilon = torch.rand(
policy_action.shape, device=policy_action.device
)
policy_dones = torch.as_tensor(
policy_batch[BufferKey.DONE], dtype=torch.float, device=default_device()
).unsqueeze(1)
expert_dones = torch.as_tensor(
expert_batch[BufferKey.DONE], dtype=torch.float, device=default_device()
).unsqueeze(1)
dones_epsilon = torch.rand(policy_dones.shape, device=policy_dones.device)
action_inputs = torch.cat(
[
action_epsilon * policy_action
+ (1 - action_epsilon) * expert_action,
dones_epsilon * policy_dones + (1 - dones_epsilon) * expert_dones,
],
dim=1,
)
action_inputs.requires_grad = True
hidden, _ = self.encoder(interp_inputs, action_inputs)
encoder_input = tuple(interp_inputs + [action_inputs])
else:
hidden, _ = self.encoder(interp_inputs)
encoder_input = tuple(interp_inputs)
if self._settings.use_vail:
use_vail_noise = True
z_mu = self._z_mu_layer(hidden)
hidden = z_mu + torch.randn_like(z_mu) * self._z_sigma * use_vail_noise
estimate = self._estimator(hidden).squeeze(1).sum()
gradient = torch.autograd.grad(estimate, encoder_input, create_graph=True)[0]
# Norm's gradient could be NaN at 0. Use our own safe_norm
safe_norm = (torch.sum(gradient**2, dim=1) + self.EPSILON).sqrt()
gradient_mag = torch.mean((safe_norm - 1) ** 2)
return gradient_mag
| DiscriminatorNetwork |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/map_metric_provider/map_metric_provider.py | {
"start": 3413,
"end": 36089
} | class ____(MetricProvider):
"""The base class for defining metrics that are evaluated for every row. An example of a map metric is
`column_values.null` (which is implemented as a `ColumnMapMetricProvider`, a subclass of `MapMetricProvider`).
""" # noqa: E501 # FIXME CoP
condition_domain_keys: tuple[str, ...] = (
"batch_id",
"table",
"row_condition",
"condition_parser",
)
function_domain_keys: tuple[str, ...] = (
"batch_id",
"table",
"row_condition",
"condition_parser",
)
condition_value_keys: tuple[str, ...] = tuple()
function_value_keys: tuple[str, ...] = tuple()
filter_column_isnull = True
@classmethod
@override
def _register_metric_functions(cls): # noqa: C901, PLR0912, PLR0915 # FIXME CoP
if not (hasattr(cls, "function_metric_name") or hasattr(cls, "condition_metric_name")):
return
for attr, candidate_metric_fn in inspect.getmembers(cls):
if not hasattr(candidate_metric_fn, "metric_engine"):
# This is not a metric.
continue
metric_fn_type = getattr(candidate_metric_fn, "metric_fn_type", None)
if not metric_fn_type:
# This is not a metric (valid metrics possess exectly one metric function).
return
engine = candidate_metric_fn.metric_engine
if not issubclass(engine, ExecutionEngine):
raise ValueError( # noqa: TRY003, TRY004 # FIXME CoP
"Metric functions must be defined with an ExecutionEngine as part of registration." # noqa: E501 # FIXME CoP
)
if metric_fn_type in [
MetricPartialFunctionTypes.MAP_CONDITION_FN,
MetricPartialFunctionTypes.MAP_CONDITION_SERIES,
MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
]:
if not hasattr(cls, "condition_metric_name"):
raise ValueError( # noqa: TRY003 # FIXME CoP
"""A "MapMetricProvider" must have a "condition_metric_name" to have a decorated \
"column_condition_partial" method.""" # noqa: E501 # FIXME CoP
)
condition_provider = candidate_metric_fn
# noinspection PyUnresolvedReferences
metric_name = cls.condition_metric_name
metric_domain_keys = cls.condition_domain_keys
metric_value_keys = cls.condition_value_keys
metric_definition_kwargs = getattr(
condition_provider, "metric_definition_kwargs", {}
)
domain_type = getattr(
condition_provider,
"domain_type",
metric_definition_kwargs.get("domain_type", MetricDomainTypes.TABLE),
)
if issubclass(engine, PandasExecutionEngine):
register_metric(
metric_name=f"{metric_name}.{metric_fn_type.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_unexpected_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_index,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_query,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUE_COUNTS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_pair_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_pair_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.MULTICOLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_multicolumn_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_multicolumn_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif issubclass(engine, SqlAlchemyExecutionEngine):
register_metric(
metric_name=f"{metric_name}.{metric_fn_type.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_index,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_query,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:
# Documentation in "MetricProvider._register_metric_functions()" explains registration protocol. # noqa: E501 # FIXME CoP
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}.{MetricPartialFunctionTypes.AGGREGATE_FN.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_unexpected_count_aggregate_fn,
metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=None,
metric_fn_type=MetricFunctionTypes.VALUE,
)
else:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUE_COUNTS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_pair_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_pair_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.MULTICOLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_multicolumn_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_multicolumn_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif issubclass(engine, SparkDFExecutionEngine):
register_metric(
metric_name=f"{metric_name}.{metric_fn_type.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_index,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_query,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:
# Documentation in "MetricProvider._register_metric_functions()" explains registration protocol. # noqa: E501 # FIXME CoP
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}.{MetricPartialFunctionTypes.AGGREGATE_FN.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_unexpected_count_aggregate_fn,
metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=None,
metric_fn_type=MetricFunctionTypes.VALUE,
)
else:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUE_COUNTS.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_column_pair_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_column_pair_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif domain_type == MetricDomainTypes.MULTICOLUMN:
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_multicolumn_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=f"{metric_name}.{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_multicolumn_map_condition_filtered_row_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif metric_fn_type in [
MetricPartialFunctionTypes.MAP_FN,
MetricPartialFunctionTypes.MAP_SERIES,
MetricPartialFunctionTypes.WINDOW_FN,
]:
if not hasattr(cls, "function_metric_name"):
raise ValueError( # noqa: TRY003 # FIXME CoP
"""A "MapMetricProvider" must have a "function_metric_name" to have a decorated \
"column_function_partial" method.""" # noqa: E501 # FIXME CoP
)
map_function_provider = candidate_metric_fn
# noinspection PyUnresolvedReferences
metric_name = cls.function_metric_name
metric_domain_keys = cls.function_domain_keys
metric_value_keys = cls.function_value_keys
register_metric(
metric_name=f"{metric_name}.{metric_fn_type.metric_suffix}",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=map_function_provider,
metric_fn_type=metric_fn_type,
)
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: ExpectationConfiguration | None = None,
execution_engine: ExecutionEngine | None = None,
runtime_configuration: dict | None = None,
):
dependencies: dict[str, MetricConfiguration] = {}
base_metric_value_kwargs = {
k: v for k, v in metric.metric_value_kwargs.items() if k != "result_format"
}
metric_name: str = metric.metric_name
metric_suffix: str = f".{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
# Documentation in "MetricProvider._register_metric_functions()" explains registration/dependency protocol. # noqa: E501 # FIXME CoP
if metric_name.endswith(metric_suffix):
has_aggregate_fn: bool = False
if execution_engine is not None:
try:
_ = get_metric_provider(
f"{metric_name}.{MetricPartialFunctionTypes.AGGREGATE_FN.metric_suffix}",
execution_engine,
)
has_aggregate_fn = True
except gx_exceptions.MetricProviderError:
pass
if has_aggregate_fn:
dependencies["metric_partial_fn"] = MetricConfiguration(
metric_name=f"{metric_name}.{MetricPartialFunctionTypes.AGGREGATE_FN.metric_suffix}",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=base_metric_value_kwargs,
)
else:
dependencies["unexpected_condition"] = MetricConfiguration(
metric_name=(
f"{metric_name[: -len(metric_suffix)]}."
f"{MetricPartialFunctionTypeSuffixes.CONDITION.value}"
),
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=base_metric_value_kwargs,
)
# MapMetric uses "condition" metric to build "unexpected_count.aggregate_fn" and other listed metrics as well. # noqa: E501 # FIXME CoP
unexpected_condition_dependent_metric_name_suffixes: list[str] = list(
filter(
lambda element: metric_name.endswith(element),
[
f".{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}.{MetricPartialFunctionTypes.AGGREGATE_FN.metric_suffix}",
f".{SummarizationMetricNameSuffixes.UNEXPECTED_VALUE_COUNTS.value}",
f".{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
f".{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
f".{SummarizationMetricNameSuffixes.FILTERED_ROW_COUNT.value}",
f".{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
f".{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
],
)
)
if len(unexpected_condition_dependent_metric_name_suffixes) == 1:
metric_suffix = unexpected_condition_dependent_metric_name_suffixes[0]
if metric_name.endswith(metric_suffix):
dependencies["unexpected_condition"] = MetricConfiguration(
metric_name=f"{metric_name[: -len(metric_suffix)]}."
f"{MetricPartialFunctionTypeSuffixes.CONDITION.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=base_metric_value_kwargs,
)
return dependencies
@staticmethod
def is_sqlalchemy_metric_selectable(
map_metric_provider: MetaMetricProvider,
) -> bool:
# deprecated-v0.16.1
warnings.warn(
"MapMetricProvider.is_sqlalchemy_metric_selectable is deprecated."
"You can use the great_expectations.expectations.metrics.map_metric_provider.is_sqlalchemy_metric_selectable._is_sqlalchemy_metric_selectable function, but please note that it is not considered part of the public API, and could change in the future.", # noqa: E501 # FIXME CoP
DeprecationWarning,
)
return _is_sqlalchemy_metric_selectable(map_metric_provider)
| MapMetricProvider |
python | huggingface__transformers | tests/pipelines/test_pipelines_feature_extraction.py | {
"start": 991,
"end": 12200
} | class ____(unittest.TestCase):
model_mapping = MODEL_MAPPING
@require_torch
def test_small_model_pt(self):
feature_extractor = pipeline(task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert")
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
@require_torch
def test_tokenization_small_model_pt(self):
feature_extractor = pipeline(task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert")
# test with empty parameters
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
# test with various tokenizer parameters
tokenize_kwargs = {"max_length": 3}
outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (3, 32))
tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {"padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
# raise value error if truncation parameter given for two places
tokenize_kwargs = {"truncation": True}
with self.assertRaises(ValueError):
_ = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
@require_torch
def test_return_tensors_pt(self):
feature_extractor = pipeline(task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert")
outputs = feature_extractor("This is a test", return_tensors=True)
self.assertTrue(torch.is_tensor(outputs))
def get_shape(self, input_, shape=None):
if shape is None:
shape = []
if isinstance(input_, list):
subshapes = [self.get_shape(in_, shape) for in_ in input_]
if all(s == 0 for s in subshapes):
shape.append(len(input_))
else:
subshape = subshapes[0]
shape = [len(input_), *subshape]
elif isinstance(input_, float):
return 0
else:
raise TypeError("We expect lists of floats, nothing else")
return shape
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
if tokenizer is None:
self.skipTest(reason="No tokenizer")
elif (
type(model.config) in FEATURE_EXTRACTOR_MAPPING
or isinstance(model.config, LxmertConfig)
or type(model.config) in IMAGE_PROCESSOR_MAPPING
):
self.skipTest(
reason="This is a bimodal model, we need to find a more consistent way to switch on those models."
)
elif model.config.is_encoder_decoder:
self.skipTest(
"""encoder_decoder models are trickier for this pipeline.
Do we want encoder + decoder inputs to get some features?
Do we want encoder only features ?
For now ignore those.
"""
)
feature_extractor_pipeline = FeatureExtractionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return feature_extractor_pipeline, ["This is a test", "This is another test"]
def run_pipeline_test(self, feature_extractor, examples):
outputs = feature_extractor("This is a test")
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1)
# If we send too small input
# there's a bug within FunnelModel (output with shape [1, 4, 2, 1] doesn't match the broadcast shape [1, 4, 2, 2])
outputs = feature_extractor(["This is a test", "Another longer test"])
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 2)
outputs = feature_extractor("This is a test" * 100, truncation=True)
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1)
| FeatureExtractionPipelineTests |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 37261,
"end": 41069
} | class ____(ObjectBaseModel):
"""An ORM representation of deployment data."""
name: Name = Field(default=..., description="The name of the deployment.")
version: Optional[str] = Field(
default=None, description="An optional version for the deployment."
)
version_id: Optional[UUID] = Field(
default=None, description="The ID of the current version of the deployment."
)
version_info: Optional[VersionInfo] = Field(
default=None, description="A description of this version of the deployment."
)
description: Optional[str] = Field(
default=None, description="A description for the deployment."
)
flow_id: UUID = Field(
default=..., description="The flow id associated with the deployment."
)
paused: bool = Field(
default=False, description="Whether or not the deployment is paused."
)
concurrency_limit: Optional[int] = Field(
default=None, description="The concurrency limit for the deployment."
)
schedules: list[DeploymentSchedule] = Field(
default_factory=lambda: [],
description="A list of schedules for the deployment.",
)
job_variables: dict[str, Any] = Field(
default_factory=dict,
description="Overrides to apply to flow run infrastructure at runtime.",
)
parameters: dict[str, Any] = Field(
default_factory=dict,
description="Parameters for flow runs scheduled by the deployment.",
)
pull_steps: Optional[list[dict[str, Any]]] = Field(
default=None,
description="Pull steps for cloning and running this deployment.",
)
tags: list[str] = Field(
default_factory=list,
description="A list of tags for the deployment",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField
work_queue_name: Optional[str] = Field(
default=None,
description=(
"The work queue for the deployment. If no work queue is set, work will not"
" be scheduled."
),
)
last_polled: Optional[DateTime] = Field(
default=None,
description="The last time the deployment was polled for status updates.",
)
parameter_openapi_schema: Optional[dict[str, Any]] = Field(
default=None,
description="The parameter schema of the flow, including defaults.",
)
path: Optional[str] = Field(
default=None,
description=(
"The path to the working directory for the workflow, relative to remote"
" storage or an absolute path."
),
)
entrypoint: Optional[str] = Field(
default=None,
description=(
"The path to the entrypoint for the workflow, relative to the `path`."
),
)
storage_document_id: Optional[UUID] = Field(
default=None,
description="The block document defining storage used for this flow.",
)
infrastructure_document_id: Optional[UUID] = Field(
default=None,
description="The block document defining infrastructure to use for flow runs.",
)
created_by: Optional[CreatedBy] = Field(
default=None,
description="Optional information about the creator of this deployment.",
)
updated_by: Optional[UpdatedBy] = Field(
default=None,
description="Optional information about the updater of this deployment.",
)
work_queue_id: Optional[UUID] = Field(
default=None,
description=(
"The id of the work pool queue to which this deployment is assigned."
),
)
enforce_parameter_schema: bool = Field(
default=True,
description=(
"Whether or not the deployment should enforce the parameter schema."
),
)
| Deployment |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/io.py | {
"start": 3872,
"end": 11342
} | class ____(IR):
"""
Input from a split file.
This class wraps a single-file `Scan` object. At
IO/evaluation time, this class will only perform
a partial read of the underlying file. The range
(skip_rows and n_rows) is calculated at IO time.
"""
__slots__ = (
"base_scan",
"parquet_options",
"schema",
"split_index",
"total_splits",
)
_non_child = (
"schema",
"base_scan",
"split_index",
"total_splits",
"parquet_options",
)
base_scan: Scan
"""Scan operation this node is based on."""
split_index: int
"""Index of the current split."""
total_splits: int
"""Total number of splits."""
parquet_options: ParquetOptions
"""Parquet-specific options."""
def __init__(
self,
schema: Schema,
base_scan: Scan,
split_index: int,
total_splits: int,
parquet_options: ParquetOptions,
):
self.schema = schema
self.base_scan = base_scan
self.split_index = split_index
self.total_splits = total_splits
self._non_child_args = (
split_index,
total_splits,
*base_scan._non_child_args,
)
self.parquet_options = parquet_options
self.children = ()
if base_scan.typ not in ("parquet",): # pragma: no cover
raise NotImplementedError(
f"Unhandled Scan type for file splitting: {base_scan.typ}"
)
@classmethod
def do_evaluate(
cls,
split_index: int,
total_splits: int,
schema: Schema,
typ: str,
reader_options: dict[str, Any],
paths: list[str],
with_columns: list[str] | None,
skip_rows: int,
n_rows: int,
row_index: tuple[str, int] | None,
include_file_paths: str | None,
predicate: NamedExpr | None,
parquet_options: ParquetOptions,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
if typ not in ("parquet",): # pragma: no cover
raise NotImplementedError(f"Unhandled Scan type for file splitting: {typ}")
if len(paths) > 1: # pragma: no cover
raise ValueError(f"Expected a single path, got: {paths}")
# Parquet logic:
# - We are one of "total_splits" SplitScan nodes
# assigned to the same file.
# - We know our index within this file ("split_index")
# - We can also use parquet metadata to query the
# total number of rows in each row-group of the file.
# - We can use all this information to calculate the
# "skip_rows" and "n_rows" options to use locally.
rowgroup_metadata = plc.io.parquet_metadata.read_parquet_metadata(
plc.io.SourceInfo(paths)
).rowgroup_metadata()
total_row_groups = len(rowgroup_metadata)
if total_splits <= total_row_groups:
# We have enough row-groups in the file to align
# all "total_splits" of our reads with row-group
# boundaries. Calculate which row-groups to include
# in the current read, and use metadata to translate
# the row-group indices to "skip_rows" and "n_rows".
rg_stride = total_row_groups // total_splits
skip_rgs = rg_stride * split_index
skip_rows = sum(rg["num_rows"] for rg in rowgroup_metadata[:skip_rgs])
n_rows = sum(
rg["num_rows"]
for rg in rowgroup_metadata[skip_rgs : skip_rgs + rg_stride]
)
else:
# There are not enough row-groups to align
# all "total_splits" of our reads with row-group
# boundaries. Use metadata to directly calculate
# "skip_rows" and "n_rows" for the current read.
total_rows = sum(rg["num_rows"] for rg in rowgroup_metadata)
n_rows = total_rows // total_splits
skip_rows = n_rows * split_index
# Last split should always read to end of file
if split_index == (total_splits - 1):
n_rows = -1
# Perform the partial read
return Scan.do_evaluate(
schema,
typ,
reader_options,
paths,
with_columns,
skip_rows,
n_rows,
row_index,
include_file_paths,
predicate,
parquet_options,
context=context,
)
@lower_ir_node.register(Empty)
def _(
ir: Empty, rec: LowerIRTransformer
) -> tuple[IR, MutableMapping[IR, PartitionInfo]]:
return ir, {ir: PartitionInfo(count=1)} # pragma: no cover
@lower_ir_node.register(Scan)
def _(
ir: Scan, rec: LowerIRTransformer
) -> tuple[IR, MutableMapping[IR, PartitionInfo]]:
partition_info: MutableMapping[IR, PartitionInfo]
config_options = rec.state["config_options"]
if (
ir.typ in ("csv", "parquet", "ndjson")
and ir.n_rows == -1
and ir.skip_rows == 0
and ir.row_index is None
):
plan = scan_partition_plan(ir, rec.state["stats"], config_options)
paths = list(ir.paths)
if plan.flavor == IOPartitionFlavor.SPLIT_FILES:
# Disable chunked reader when splitting files
parquet_options = dataclasses.replace(
config_options.parquet_options,
chunked=False,
)
slices: list[SplitScan] = []
for path in paths:
base_scan = Scan(
ir.schema,
ir.typ,
ir.reader_options,
ir.cloud_options,
[path],
ir.with_columns,
ir.skip_rows,
ir.n_rows,
ir.row_index,
ir.include_file_paths,
ir.predicate,
parquet_options,
)
slices.extend(
SplitScan(
ir.schema, base_scan, sindex, plan.factor, parquet_options
)
for sindex in range(plan.factor)
)
new_node = Union(ir.schema, None, *slices)
partition_info = {slice: PartitionInfo(count=1) for slice in slices} | {
new_node: PartitionInfo(count=len(slices))
}
else:
groups: list[Scan] = [
Scan(
ir.schema,
ir.typ,
ir.reader_options,
ir.cloud_options,
paths[i : i + plan.factor],
ir.with_columns,
ir.skip_rows,
ir.n_rows,
ir.row_index,
ir.include_file_paths,
ir.predicate,
config_options.parquet_options,
)
for i in range(0, len(paths), plan.factor)
]
new_node = Union(ir.schema, None, *groups)
partition_info = {group: PartitionInfo(count=1) for group in groups} | {
new_node: PartitionInfo(count=len(groups))
}
return new_node, partition_info
return ir, {ir: PartitionInfo(count=1)} # pragma: no cover
| SplitScan |
python | modin-project__modin | modin/numpy/indexing.py | {
"start": 8056,
"end": 21312
} | class ____(object):
"""
An indexer for modin_arr.__{get|set}item__ functionality.
Parameters
----------
array : modin.numpy.array
Array to operate on.
"""
def __init__(self, array):
self.arr = array
def _get_numpy_object_from_qc_view(
self,
qc_view,
row_scalar: bool,
col_scalar: bool,
ndim: int,
):
"""
Convert the query compiler view to the appropriate NumPy object.
Parameters
----------
qc_view : BaseQueryCompiler
Query compiler to convert.
row_scalar : bool
Whether indexer for rows is scalar.
col_scalar : bool
Whether indexer for columns is scalar.
ndim : {0, 1, 2}
Number of dimensions in dataset to be retrieved.
Returns
-------
modin.numpy.array
The array object with the data from the query compiler view.
Notes
-----
Usage of `slice(None)` as a lookup is a hack to pass information about
full-axis grab without computing actual indices that triggers lazy computations.
Ideally, this API should get rid of using slices as indexers and either use a
common ``Indexer`` object or range and ``np.ndarray`` only.
"""
if ndim == 2:
return array(_query_compiler=qc_view, _ndim=self.arr._ndim)
if self.arr._ndim == 1 and not row_scalar:
return array(_query_compiler=qc_view, _ndim=1)
if self.arr._ndim == 1:
_ndim = 0
elif ndim == 0:
_ndim = 0
else:
# We are in the case where ndim == 1
# The axis we squeeze on depends on whether we are looking for an exact
# value or a subset of rows and columns. Knowing if we have a full MultiIndex
# lookup or scalar lookup can help us figure out whether we need to squeeze
# on the row or column index.
if row_scalar and col_scalar:
_ndim = 0
elif not any([row_scalar, col_scalar]):
_ndim = 2
else:
_ndim = 1
if row_scalar:
qc_view = qc_view.transpose()
if _ndim == 0:
return qc_view.to_numpy()[0, 0]
res_arr = array(_query_compiler=qc_view, _ndim=_ndim)
return res_arr
def _parse_row_and_column_locators(self, tup):
"""
Unpack the user input for getitem and setitem and compute ndim.
loc[a] -> ([a], :), 1D
loc[[a,b]] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
Parameters
----------
tup : tuple
User input to unpack.
Returns
-------
row_loc : scalar or list
Row locator(s) as a scalar or List.
col_list : scalar or list
Column locator(s) as a scalar or List.
ndim : {0, 1, 2}
Number of dimensions of located dataset.
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError("Too many indexers")
else:
row_loc = tup
row_loc = row_loc(self.arr) if callable(row_loc) else row_loc
col_loc = col_loc(self.arr) if callable(col_loc) else col_loc
row_loc = row_loc._to_numpy() if isinstance(row_loc, array) else row_loc
col_loc = col_loc._to_numpy() if isinstance(col_loc, array) else col_loc
return row_loc, col_loc, _compute_ndim(row_loc, col_loc)
def __getitem__(self, key):
"""
Retrieve dataset according to `key`.
Parameters
----------
key : callable or tuple
The global row numbers to retrieve data from.
Returns
-------
DataFrame or Series
Located dataset.
See Also
--------
pandas.DataFrame.iloc
"""
row_loc, col_loc, ndim = self._parse_row_and_column_locators(key)
row_scalar = is_scalar(row_loc)
col_scalar = is_scalar(col_loc)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
if isinstance(row_lookup, slice):
ErrorMessage.catch_bugs_and_request_email(
failure_condition=row_lookup != slice(None),
extra_log=f"Only None-slices are acceptable as a slice argument in masking, got: {row_lookup}",
)
row_lookup = None
if isinstance(col_lookup, slice):
ErrorMessage.catch_bugs_and_request_email(
failure_condition=col_lookup != slice(None),
extra_log=f"Only None-slices are acceptable as a slice argument in masking, got: {col_lookup}",
)
col_lookup = None
qc_view = self.arr._query_compiler.take_2d_positional(row_lookup, col_lookup)
result = self._get_numpy_object_from_qc_view(
qc_view,
row_scalar=row_scalar,
col_scalar=col_scalar,
ndim=ndim,
)
return result
def _determine_setitem_axis(self, row_lookup, col_lookup, row_scalar, col_scalar):
"""
Determine an axis along which we should do an assignment.
Parameters
----------
row_lookup : slice or list
Indexer for rows.
col_lookup : slice or list
Indexer for columns.
row_scalar : bool
Whether indexer for rows is scalar or not.
col_scalar : bool
Whether indexer for columns is scalar or not.
Returns
-------
int or None
None if this will be a both axis assignment, number of axis to assign in other cases.
Notes
-----
axis = 0: column assignment df[col] = item
axis = 1: row assignment df.loc[row] = item
axis = None: assignment along both axes
"""
if self.arr.shape == (1, 1):
return None if not (row_scalar ^ col_scalar) else 1 if row_scalar else 0
def get_axis(axis):
return (
self.arr._query_compiler.index
if axis == 0
else self.arr._query_compiler.columns
)
row_lookup_len, col_lookup_len = [
(
len(lookup)
if not isinstance(lookup, slice)
else compute_sliced_len(lookup, len(get_axis(i)))
)
for i, lookup in enumerate([row_lookup, col_lookup])
]
if col_lookup_len == 1 and row_lookup_len == 1:
axis = None
elif (
row_lookup_len == len(self.arr._query_compiler.index)
and col_lookup_len == 1
and self.arr._ndim == 2
):
axis = 0
elif (
col_lookup_len == len(self.arr._query_compiler.columns)
and row_lookup_len == 1
):
axis = 1
else:
axis = None
return axis
def _setitem_positional(self, row_lookup, col_lookup, item, axis=None):
"""
Assign `item` value to located dataset.
Parameters
----------
row_lookup : slice or scalar
The global row index to write item to.
col_lookup : slice or scalar
The global col index to write item to.
item : DataFrame, Series or scalar
The new item needs to be set. It can be any shape that's
broadcast-able to the product of the lookup tables.
axis : {None, 0, 1}, default: None
If not None, it means that whole axis is used to assign a value.
0 means assign to whole column, 1 means assign to whole row.
If None, it means that partial assignment is done on both axes.
"""
# Convert slices to indices for the purposes of application.
# TODO (devin-petersohn): Apply to slice without conversion to list
if isinstance(row_lookup, slice):
row_lookup = range(len(self.arr._query_compiler.index))[row_lookup]
if isinstance(col_lookup, slice):
col_lookup = range(len(self.arr._query_compiler.columns))[col_lookup]
new_qc = self.arr._query_compiler.write_items(row_lookup, col_lookup, item)
self.arr._update_inplace(new_qc)
def __setitem__(self, key, item):
"""
Assign `item` value to dataset located by `key`.
Parameters
----------
key : callable or tuple
The global row numbers to assign data to.
item : modin.pandas.DataFrame, modin.pandas.Series or scalar
Value that should be assigned to located dataset.
See Also
--------
pandas.DataFrame.iloc
"""
row_loc, col_loc, _ = self._parse_row_and_column_locators(key)
row_scalar = is_scalar(row_loc)
col_scalar = is_scalar(col_loc)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
self._setitem_positional(
row_lookup,
col_lookup,
item,
axis=self._determine_setitem_axis(
row_lookup, col_lookup, row_scalar, col_scalar
),
)
def _compute_lookup(self, row_loc, col_loc):
"""
Compute index and column labels from index and column integer locators.
Parameters
----------
row_loc : slice, list, array or tuple
Row locator.
col_loc : slice, list, array or tuple
Columns locator.
Returns
-------
row_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise
List of index labels.
col_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise
List of columns labels.
Notes
-----
Usage of `slice(None)` as a resulting lookup is a hack to pass information about
full-axis grab without computing actual indices that triggers lazy computations.
Ideally, this API should get rid of using slices as indexers and either use a
common ``Indexer`` object or range and ``np.ndarray`` only.
"""
lookups = []
for axis, axis_loc in enumerate((row_loc, col_loc)):
if is_scalar(axis_loc):
axis_loc = np.array([axis_loc])
if isinstance(axis_loc, slice):
axis_lookup = (
axis_loc
if axis_loc == slice(None)
else pandas.RangeIndex(
*axis_loc.indices(len(self.arr._query_compiler.get_axis(axis)))
)
)
elif is_range_like(axis_loc):
axis_lookup = pandas.RangeIndex(
axis_loc.start, axis_loc.stop, axis_loc.step
)
elif is_boolean_array(axis_loc):
axis_lookup = boolean_mask_to_numeric(axis_loc)
else:
if isinstance(axis_loc, pandas.Index):
axis_loc = axis_loc.values
elif is_list_like(axis_loc) and not isinstance(axis_loc, np.ndarray):
# `Index.__getitem__` works much faster with numpy arrays than with python lists,
# so although we lose some time here on converting to numpy, `Index.__getitem__`
# speedup covers the loss that we gain here.
axis_loc = np.array(axis_loc, dtype=np.int64)
# Relatively fast check allows us to not trigger `self.qc.get_axis()` computation
# if there're no negative indices and so they don't not depend on the axis length.
if isinstance(axis_loc, np.ndarray) and not (axis_loc < 0).any():
axis_lookup = axis_loc
else:
axis_lookup = pandas.RangeIndex(
len(self.arr._query_compiler.get_axis(axis))
)[axis_loc]
if isinstance(axis_lookup, pandas.Index) and not is_range_like(axis_lookup):
axis_lookup = axis_lookup.values
lookups.append(axis_lookup)
return lookups
def _check_dtypes(self, locator):
"""
Check that `locator` is an integer scalar, integer slice, integer list or array of booleans.
Parameters
----------
locator : scalar, list, slice or array
Object to check.
Raises
------
ValueError
If check fails.
"""
is_int = is_integer(locator)
is_int_slice = is_integer_slice(locator)
is_int_arr = is_integer_array(locator)
is_bool_arr = is_boolean_array(locator)
if not any([is_int, is_int_slice, is_int_arr, is_bool_arr]):
raise ValueError(_ILOC_INT_ONLY_ERROR)
| ArrayIndexer |
python | mahmoud__glom | glom/reduction.py | {
"start": 356,
"end": 508
} | class ____(GlomError):
"""Error raised when Fold() is called on non-iterable
targets, and possibly other uses in the future."""
pass
| FoldError |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 52344,
"end": 56417
} | class ____(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.conv_dim
self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
n_levels=3,
n_points=4,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.is_training = config.is_training
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.is_training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetrEncoder->OneFormerPixelDecoderEncoderOnly
| OneFormerPixelDecoderEncoderLayer |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 260735,
"end": 262232
} | class ____(TypedDict, closed=True, total=False): # type: ignore[call-arg]
"""
:class:`altair.StyleConfigIndex` ``TypedDict`` wrapper.
Parameters
----------
arc
Arc-specific Config
area
Area-Specific Config
bar
Bar-Specific Config
circle
Circle-Specific Config
geoshape
Geoshape-Specific Config
image
Image-specific Config
line
Line-Specific Config
mark
Mark Config
point
Point-Specific Config
rect
Rect-Specific Config
rule
Rule-Specific Config
square
Square-Specific Config
text
Text-Specific Config
tick
Tick-Specific Config
trail
Trail-Specific Config
Notes
-----
The following keys may be specified as string literals **only**:
['group-subtitle', 'group-title', 'guide-label', 'guide-title']
See `PEP728`_ for type checker compatibility.
.. _PEP728:
https://peps.python.org/pep-0728/#reference-implementation
"""
arc: RectConfigKwds
area: AreaConfigKwds
bar: BarConfigKwds
circle: MarkConfigKwds
geoshape: MarkConfigKwds
image: RectConfigKwds
line: LineConfigKwds
mark: MarkConfigKwds
point: MarkConfigKwds
rect: RectConfigKwds
rule: MarkConfigKwds
square: MarkConfigKwds
text: MarkConfigKwds
tick: TickConfigKwds
trail: LineConfigKwds
__extra_items__: MarkConfigKwds
| StyleConfigIndexKwds |
python | simonw__datasette | datasette/views/row.py | {
"start": 486,
"end": 5528
} | class ____(DataView):
name = "row"
async def data(self, request, default_labels=False):
resolved = await self.ds.resolve_row(request)
db = resolved.db
database = db.name
table = resolved.table
pk_values = resolved.pk_values
# Ensure user has permission to view this row
visible, private = await self.ds.check_visibility(
request.actor,
action="view-table",
resource=TableResource(database=database, table=table),
)
if not visible:
raise Forbidden("You do not have permission to view this table")
results = await resolved.db.execute(
resolved.sql, resolved.params, truncate=True
)
columns = [r[0] for r in results.description]
rows = list(results.rows)
if not rows:
raise NotFound(f"Record not found: {pk_values}")
async def template_data():
display_columns, display_rows = await display_columns_and_rows(
self.ds,
database,
table,
results.description,
rows,
link_column=False,
truncate_cells=0,
request=request,
)
for column in display_columns:
column["sortable"] = False
row_actions = []
for hook in pm.hook.row_actions(
datasette=self.ds,
actor=request.actor,
request=request,
database=database,
table=table,
row=rows[0],
):
extra_links = await await_me_maybe(hook)
if extra_links:
row_actions.extend(extra_links)
return {
"private": private,
"foreign_key_tables": await self.foreign_key_tables(
database, table, pk_values
),
"database_color": db.color,
"display_columns": display_columns,
"display_rows": display_rows,
"custom_table_templates": [
f"_table-{to_css_class(database)}-{to_css_class(table)}.html",
f"_table-row-{to_css_class(database)}-{to_css_class(table)}.html",
"_table.html",
],
"row_actions": row_actions,
"top_row": make_slot_function(
"top_row",
self.ds,
request,
database=resolved.db.name,
table=resolved.table,
row=rows[0],
),
"metadata": {},
}
data = {
"database": database,
"table": table,
"rows": rows,
"columns": columns,
"primary_keys": resolved.pks,
"primary_key_values": pk_values,
}
if "foreign_key_tables" in (request.args.get("_extras") or "").split(","):
data["foreign_key_tables"] = await self.foreign_key_tables(
database, table, pk_values
)
return (
data,
template_data,
(
f"row-{to_css_class(database)}-{to_css_class(table)}.html",
"row.html",
),
)
async def foreign_key_tables(self, database, table, pk_values):
if len(pk_values) != 1:
return []
db = self.ds.databases[database]
all_foreign_keys = await db.get_all_foreign_keys()
foreign_keys = all_foreign_keys[table]["incoming"]
if len(foreign_keys) == 0:
return []
sql = "select " + ", ".join(
[
"(select count(*) from {table} where {column}=:id)".format(
table=escape_sqlite(fk["other_table"]),
column=escape_sqlite(fk["other_column"]),
)
for fk in foreign_keys
]
)
try:
rows = list(await db.execute(sql, {"id": pk_values[0]}))
except QueryInterrupted:
# Almost certainly hit the timeout
return []
foreign_table_counts = dict(
zip(
[(fk["other_table"], fk["other_column"]) for fk in foreign_keys],
list(rows[0]),
)
)
foreign_key_tables = []
for fk in foreign_keys:
count = (
foreign_table_counts.get((fk["other_table"], fk["other_column"])) or 0
)
key = fk["other_column"]
if key.startswith("_"):
key += "__exact"
link = "{}?{}={}".format(
self.ds.urls.table(database, fk["other_table"]),
key,
",".join(pk_values),
)
foreign_key_tables.append({**fk, **{"count": count, "link": link}})
return foreign_key_tables
| RowView |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/external_buildable_with_variant/package.py | {
"start": 217,
"end": 567
} | class ____(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/module-1.0.tar.gz"
version("1.0", md5="1234567890abcdef1234567890abcdef")
version("0.9", md5="1234567890abcdef1234567890abcdef")
variant("baz", default=False, description="nope")
depends_on("pkg-c@1.0", when="@0.9")
| ExternalBuildableWithVariant |
python | networkx__networkx | networkx/classes/graph.py | {
"start": 2326,
"end": 72354
} | class ____:
"""
Base class for undirected graphs.
A Graph stores nodes and edges with optional data, or attributes.
Graphs hold undirected edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes, except that `None` is not allowed as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy
sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
DiGraph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.Graph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.Graph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time="5pm")
>>> G.add_nodes_from([3], time="2pm")
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]["room"] = 714 # node must exist already to use G.nodes
>>> del G.nodes[1]["room"] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7)
>>> G.add_edges_from([(3, 4), (4, 5)], color="red")
>>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})])
>>> G[1][2]["weight"] = 4.7
>>> G.edges[1, 2]["weight"] = 4
Warning: we protect the graph data structure by making `G.edges` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if "weight" in eattr:
... # Do something useful with the edges
... pass
But the edges() method is often more convenient:
>>> for u, v, weight in G.edges.data("weight"):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting typically provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes`, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherit without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
**Subclassing Example**
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {"weight": 1}
...
... def single_edge_dict(self):
... return self.all_edge_dict
...
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
"""
__networkx_backend__ = "networkx"
_adj = _CachedPropertyResetterAdj()
_node = _CachedPropertyResetterNode()
node_dict_factory = dict
node_attr_dict_factory = dict
adjlist_outer_dict_factory = dict
adjlist_inner_dict_factory = dict
edge_attr_dict_factory = dict
graph_attr_dict_factory = dict
def to_directed_class(self):
"""Returns the class to use for empty directed copies.
If you subclass the base classes, use this to designate
what directed class to use for `to_directed()` copies.
"""
return nx.DiGraph
def to_undirected_class(self):
"""Returns the class to use for empty undirected copies.
If you subclass the base classes, use this to designate
what directed class to use for `to_directed()` copies.
"""
return Graph
# This __new__ method just does what Python itself does automatically.
# We include it here as part of the dispatchable/backend interface.
# If your goal is to understand how the graph classes work, you can ignore
# this method, even when subclassing the base classes. If you are subclassing
# in order to provide a backend that allows class instantiation, this method
# can be overridden to return your own backend graph class.
@nx._dispatchable(name="graph__new__", graphs=None, returns_graph=True)
def __new__(cls, incoming_graph_data=None, **attr):
return object.__new__(cls)
def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a 2D NumPy array, a
SciPy sparse array, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # empty node attribute dict
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict
self.__networkx_cache__ = {}
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
attr.pop("backend", None) # Ignore explicit `backend="networkx"`
self.graph.update(attr)
@cached_property
def adj(self):
"""Graph adjacency object holding the neighbors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.adj behaves like a dict. Useful idioms include
`for nbr, datadict in G.adj[n].items():`.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` holds outgoing (successor) info.
"""
return AdjacencyView(self._adj)
@property
def name(self):
"""String identifier of the graph.
This graph attribute appears in the attribute dict G.graph
keyed by the string `"name"`. as well as an attribute (technically
a property) `G.name`. This is entirely user controlled.
"""
return self.graph.get("name", "")
@name.setter
def name(self, s):
self.graph["name"] = s
nx._clear_cache(self)
def __str__(self):
"""Returns a short summary of the graph.
Returns
-------
info : string
Graph information including the graph name (if any), graph type, and the
number of nodes and edges.
Examples
--------
>>> G = nx.Graph(name="foo")
>>> str(G)
"Graph named 'foo' with 0 nodes and 0 edges"
>>> G = nx.path_graph(3)
>>> str(G)
'Graph with 3 nodes and 2 edges'
"""
return "".join(
[
type(self).__name__,
f" named {self.name!r}" if self.name else "",
f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges",
]
)
def __iter__(self):
"""Iterate over the nodes. Use: 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G]
[0, 1, 2, 3]
>>> list(G)
[0, 1, 2, 3]
"""
return iter(self._node)
def __contains__(self, n):
"""Returns True if n is a node, False otherwise. Use: 'n in G'.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> 1 in G
True
"""
try:
return n in self._node
except TypeError:
return False
def __len__(self):
"""Returns the number of nodes in the graph. Use: 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes: identical method
order: identical method
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> len(G)
4
"""
return len(self._node)
def __getitem__(self, n):
"""Returns a dict of neighbors of node n. Use: 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is the same as G.adj[n] and similar to G.neighbors(n)
(which is an iterator over G.adj[n])
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0]
AtlasView({1: {}})
"""
return self.adj[n]
def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._node:
if node_for_adding is None:
raise ValueError("None cannot be a node")
self._adj[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
nx._clear_cache(self)
def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Notes
-----
When adding nodes from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.add_nodes_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.Graph([(0, 1), (1, 2), (3, 4)])
>>> # wrong way - will raise RuntimeError
>>> # G.add_nodes_from(n + 1 for n in G.nodes)
>>> # correct way
>>> G.add_nodes_from(list(n + 1 for n in G.nodes))
"""
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
if n is None:
raise ValueError("None cannot be a node")
self._adj[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
nx._clear_cache(self)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a nonexistent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> list(G.edges)
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> list(G.edges)
[]
"""
adj = self._adj
try:
nbrs = list(adj[n]) # list handles self-loops (allows mutation)
del self._node[n]
except KeyError as err: # NetworkXError if n not in self
raise NetworkXError(f"The node {n} is not in the graph.") from err
for u in nbrs:
del adj[u][n] # remove all edges n-u in graph
del adj[n] # now remove node
nx._clear_cache(self)
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Notes
-----
When removing nodes from an iterator over the graph you are changing,
a `RuntimeError` will be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.remove_nodes_from`.
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = list(G.nodes)
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> list(G.nodes)
[]
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.Graph([(0, 1), (1, 2), (3, 4)])
>>> # this command will fail, as the graph's dict is modified during iteration
>>> # G.remove_nodes_from(n for n in G.nodes if n < 2)
>>> # this command will work, since the dictionary underlying graph is not modified
>>> G.remove_nodes_from(list(n for n in G.nodes if n < 2))
"""
adj = self._adj
for n in nodes:
try:
del self._node[n]
for u in list(adj[n]): # list handles self-loops
del adj[u][n] # (allows mutation of dict in loop)
del adj[n]
except KeyError:
pass
nx._clear_cache(self)
@cached_property
def nodes(self):
"""A NodeView of the Graph as G.nodes or G.nodes().
Can be used as `G.nodes` for data lookup and for set-like operations.
Can also be used as `G.nodes(data='color', default=None)` to return a
NodeDataView which reports specific node data but no set operations.
It presents a dict-like interface as well with `G.nodes.items()`
iterating over `(node, nodedata)` 2-tuples and `G.nodes[3]['foo']`
providing the value of the `foo` attribute for node `3`. In addition,
a view `G.nodes.data('foo')` provides a dict-like interface to the
`foo` attribute of each node. `G.nodes.data('foo', default=1)`
provides a default for nodes that do not have attribute `foo`.
Parameters
----------
data : string or bool, optional (default=False)
The node attribute returned in 2-tuple (n, ddict[data]).
If True, return entire node attribute dict as (n, ddict).
If False, return just the nodes n.
default : value, optional (default=None)
Value used for nodes that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
NodeView
Allows set-like operations over the nodes as well as node
attribute dict lookup and calling to get a NodeDataView.
A NodeDataView iterates over `(n, data)` and has no set operations.
A NodeView iterates over `n` and includes set operations.
When called, if data is False, an iterator over nodes.
Otherwise an iterator of 2-tuples (node, attribute value)
where the attribute is specified in `data`.
If data is True then the attribute becomes the
entire data dictionary.
Notes
-----
If your node data is not needed, it is simpler and equivalent
to use the expression ``for n in G``, or ``list(G)``.
Examples
--------
There are two simple ways of getting a list of all nodes in the graph:
>>> G = nx.path_graph(3)
>>> list(G.nodes)
[0, 1, 2]
>>> list(G)
[0, 1, 2]
To get the node data along with the nodes:
>>> G.add_node(1, time="5pm")
>>> G.nodes[0]["foo"] = "bar"
>>> list(G.nodes(data=True))
[(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})]
>>> list(G.nodes.data())
[(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})]
>>> list(G.nodes(data="foo"))
[(0, 'bar'), (1, None), (2, None)]
>>> list(G.nodes.data("foo"))
[(0, 'bar'), (1, None), (2, None)]
>>> list(G.nodes(data="time"))
[(0, None), (1, '5pm'), (2, None)]
>>> list(G.nodes.data("time"))
[(0, None), (1, '5pm'), (2, None)]
>>> list(G.nodes(data="time", default="Not Available"))
[(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')]
>>> list(G.nodes.data("time", default="Not Available"))
[(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')]
If some of your nodes have an attribute and the rest are assumed
to have a default attribute value you can create a dictionary
from node/attribute pairs using the `default` keyword argument
to guarantee the value is never None::
>>> G = nx.Graph()
>>> G.add_node(0)
>>> G.add_node(1, weight=2)
>>> G.add_node(2, weight=3)
>>> dict(G.nodes(data="weight", default=1))
{0: 1, 1: 2, 2: 3}
"""
return NodeView(self)
def number_of_nodes(self):
"""Returns the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order: identical method
__len__: identical method
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.number_of_nodes()
3
"""
return len(self._node)
def order(self):
"""Returns the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes: identical method
__len__: identical method
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.order()
3
"""
return len(self._node)
def has_node(self, n):
"""Returns True if the graph contains the node n.
Identical to `n in G`
Parameters
----------
n : node
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.has_node(0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
try:
return n in self._node
except TypeError:
return False
def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._node:
if u is None:
raise ValueError("None cannot be a node")
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._node:
if v is None:
raise ValueError("None cannot be a node")
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._adj[u][v] = datadict
self._adj[v][u] = datadict
nx._clear_cache(self)
def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_edges_from(((5, n) for n in G.nodes))
>>> # correct way - note that there will be no self-edge for node 5
>>> G.add_edges_from(list((5, n) for n in G.nodes))
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {} # doesn't need edge_attr_dict_factory
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._node:
if u is None:
raise ValueError("None cannot be a node")
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._node:
if v is None:
raise ValueError("None cannot be a node")
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._adj[u][v] = datadict
self._adj[v][u] = datadict
nx._clear_cache(self)
def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr):
"""Add weighted edges in `ebunch_to_add` with specified weight attr
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u, v, w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_weighted_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)])
Evaluate an iterator over edges before passing it
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> weight = 0.1
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_weighted_edges_from(((5, n, weight) for n in G.nodes))
>>> # correct way - note that there will be no self-edge for node 5
>>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes))
"""
self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr)
nx._clear_cache(self)
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, etc
>>> G.remove_edge(0, 1)
>>> e = (1, 2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2, 3, {"weight": 7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self._adj[u][v]
if u != v: # self-loop needs only one entry removed
del self._adj[v][u]
except KeyError as err:
raise NetworkXError(f"The edge {u}-{v} is not in the graph") from err
nx._clear_cache(self)
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u, v) edge between u and v.
- 3-tuples (u, v, k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> ebunch = [(1, 2), (2, 3)]
>>> G.remove_edges_from(ebunch)
"""
adj = self._adj
for e in ebunch:
u, v = e[:2] # ignore edge data if present
if u in adj and v in adj[u]:
del adj[u][v]
if u != v: # self loop needs only one entry removed
del adj[v][u]
nx._clear_cache(self)
def update(self, edges=None, nodes=None):
"""Update the graph using nodes/edges/graphs as input.
Like dict.update, this method takes a graph as input, adding the
graph's nodes and edges to this graph. It can also take two inputs:
edges and nodes. Finally it can take either edges or nodes.
To specify only nodes the keyword `nodes` must be used.
The collections of edges and nodes are treated similarly to
the add_edges_from/add_nodes_from methods. When iterated, they
should yield 2-tuples (u, v) or 3-tuples (u, v, datadict).
Parameters
----------
edges : Graph object, collection of edges, or None
The first parameter can be a graph or some edges. If it has
attributes `nodes` and `edges`, then it is taken to be a
Graph-like object and those attributes are used as collections
of nodes and edges to be added to the graph.
If the first parameter does not have those attributes, it is
treated as a collection of edges and added to the graph.
If the first argument is None, no edges are added.
nodes : collection of nodes, or None
The second parameter is treated as a collection of nodes
to be added to the graph unless it is None.
If `edges is None` and `nodes is None` an exception is raised.
If the first parameter is a Graph, then `nodes` is ignored.
Examples
--------
>>> G = nx.path_graph(5)
>>> G.update(nx.complete_graph(range(4, 10)))
>>> from itertools import combinations
>>> edges = (
... (u, v, {"power": u * v})
... for u, v in combinations(range(10, 20), 2)
... if u * v < 225
... )
>>> nodes = [1000] # for singleton, use a container
>>> G.update(edges, nodes)
Notes
-----
It you want to update the graph using an adjacency structure
it is straightforward to obtain the edges/nodes from adjacency.
The following examples provide common cases, your adjacency may
be slightly different and require tweaks of these examples::
>>> # dict-of-set/list/tuple
>>> adj = {1: {2, 3}, 2: {1, 3}, 3: {1, 2}}
>>> e = [(u, v) for u, nbrs in adj.items() for v in nbrs]
>>> G.update(edges=e, nodes=adj)
>>> DG = nx.DiGraph()
>>> # dict-of-dict-of-attribute
>>> adj = {1: {2: 1.3, 3: 0.7}, 2: {1: 1.4}, 3: {1: 0.7}}
>>> e = [
... (u, v, {"weight": d})
... for u, nbrs in adj.items()
... for v, d in nbrs.items()
... ]
>>> DG.update(edges=e, nodes=adj)
>>> # dict-of-dict-of-dict
>>> adj = {1: {2: {"weight": 1.3}, 3: {"color": 0.7, "weight": 1.2}}}
>>> e = [
... (u, v, {"weight": d})
... for u, nbrs in adj.items()
... for v, d in nbrs.items()
... ]
>>> DG.update(edges=e, nodes=adj)
>>> # predecessor adjacency (dict-of-set)
>>> pred = {1: {2, 3}, 2: {3}, 3: {3}}
>>> e = [(v, u) for u, nbrs in pred.items() for v in nbrs]
>>> # MultiGraph dict-of-dict-of-dict-of-attribute
>>> MDG = nx.MultiDiGraph()
>>> adj = {
... 1: {2: {0: {"weight": 1.3}, 1: {"weight": 1.2}}},
... 3: {2: {0: {"weight": 0.7}}},
... }
>>> e = [
... (u, v, ekey, d)
... for u, nbrs in adj.items()
... for v, keydict in nbrs.items()
... for ekey, d in keydict.items()
... ]
>>> MDG.update(edges=e)
See Also
--------
add_edges_from: add multiple edges to a graph
add_nodes_from: add multiple nodes to a graph
"""
if edges is not None:
if nodes is not None:
self.add_nodes_from(nodes)
self.add_edges_from(edges)
else:
# check if edges is a Graph object
try:
graph_nodes = edges.nodes
graph_edges = edges.edges
except AttributeError:
# edge not Graph-like
self.add_edges_from(edges)
else: # edges is Graph-like
self.add_nodes_from(graph_nodes.data())
self.add_edges_from(graph_edges.data())
self.graph.update(edges.graph)
elif nodes is not None:
self.add_nodes_from(nodes)
else:
raise NetworkXError("update needs nodes or edges input")
def has_edge(self, u, v):
"""Returns True if the edge (u, v) is in the graph.
This is the same as `v in G[u]` without KeyError exceptions.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
Returns
-------
edge_ind : bool
True if edge is in the graph, False otherwise.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.has_edge(0, 1) # using two nodes
True
>>> e = (0, 1)
>>> G.has_edge(*e) # e is a 2-tuple (u, v)
True
>>> e = (0, 1, {"weight": 7})
>>> G.has_edge(*e[:2]) # e is a 3-tuple (u, v, data_dictionary)
True
The following syntax are equivalent:
>>> G.has_edge(0, 1)
True
>>> 1 in G[0] # though this gives KeyError if 0 not in G
True
"""
try:
return v in self._adj[u]
except KeyError:
return False
def neighbors(self, n):
"""Returns an iterator over all neighbors of node n.
This is identical to `iter(G[n])`
Parameters
----------
n : node
A node in the graph
Returns
-------
neighbors : iterator
An iterator over all neighbors of node n
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G.neighbors(0)]
[1]
Notes
-----
Alternate ways to access the neighbors are ``G.adj[n]`` or ``G[n]``:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge("a", "b", weight=7)
>>> G["a"]
AtlasView({'b': {'weight': 7}})
>>> G = nx.path_graph(4)
>>> [n for n in G[0]]
[1]
"""
try:
return iter(self._adj[n])
except KeyError as err:
raise NetworkXError(f"The node {n} is not in the graph.") from err
@cached_property
def edges(self):
"""An EdgeView of the Graph as G.edges or G.edges().
edges(self, nbunch=None, data=False, default=None)
The EdgeView provides set-like operations on the edge-tuples
as well as edge attribute lookup. When called, it also provides
an EdgeDataView object which allows control of access to edge
attributes (but does not provide set-like operations).
Hence, `G.edges[u, v]['color']` provides the value of the color
attribute for edge `(u, v)` while
`for (u, v, c) in G.edges.data('color', default='red'):`
iterates through all the edges yielding the color attribute
with default `'red'` if no color attribute exists.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges from these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edges : EdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.path_graph(3) # or MultiGraph, etc
>>> G.add_edge(2, 3, weight=5)
>>> [e for e in G.edges]
[(0, 1), (1, 2), (2, 3)]
>>> G.edges.data() # default data is {} (empty dict)
EdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})])
>>> G.edges.data("weight", default=1)
EdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)])
>>> G.edges([0, 3]) # only edges from these nodes
EdgeDataView([(0, 1), (3, 2)])
>>> G.edges(0) # only edges from node 0
EdgeDataView([(0, 1)])
"""
return EdgeView(self)
def get_edge_data(self, u, v, default=None):
"""Returns the attribute dictionary associated with edge (u, v).
This is identical to `G[u][v]` except the default is returned
instead of an exception if the edge doesn't exist.
Parameters
----------
u, v : nodes
default: any Python object (default=None)
Value to return if the edge (u, v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0][1]
{}
Warning: Assigning to `G[u][v]` is not permitted.
But it is safe to assign attributes `G[u][v]['foo']`
>>> G[0][1]["weight"] = 7
>>> G[0][1]["weight"]
7
>>> G[1][0]["weight"]
7
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.get_edge_data(0, 1) # default edge data is {}
{}
>>> e = (0, 1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0
0
"""
try:
return self._adj[u][v]
except KeyError:
return default
def adjacency(self):
"""Returns an iterator over (node, adjacency dict) tuples for all nodes.
For directed graphs, only outgoing neighbors/adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator over (node, adjacency dictionary) for all nodes in
the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [(n, nbrdict) for n, nbrdict in G.adjacency()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
"""
return iter(self._adj.items())
@cached_property
def degree(self):
"""A DegreeView for the Graph as G.degree or G.degree().
The node degree is the number of edges adjacent to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator for (node, degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
DegreeView or int
If multiple nodes are requested (the default), returns a `DegreeView`
mapping nodes to their degree.
If a single node is requested, returns the degree of the node as an integer.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.degree[0] # node 0 has degree 1
1
>>> list(G.degree([0, 1, 2]))
[(0, 1), (1, 2), (2, 2)]
"""
return DegreeView(self)
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._adj.clear()
self._node.clear()
self.graph.clear()
nx._clear_cache(self)
def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for nbr_dict in self._adj.values():
nbr_dict.clear()
nx._clear_cache(self)
def is_multigraph(self):
"""Returns True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Returns True if graph is directed, False otherwise."""
return False
def copy(self, as_view=False):
"""Returns a copy of the graph.
The copy method by default returns an independent shallow copy
of the graph and attributes. That is, if an attribute is a
container, that container is shared by the original an the copy.
Use Python's `copy.deepcopy` for new containers.
If `as_view` is True then a view is returned instead of a copy.
Notes
-----
All copies reproduce the graph structure, but data attributes
may be handled in different ways. There are four types of copies
of a graph that people might want.
Deepcopy -- A "deepcopy" copies the graph structure as well as
all data attributes and any objects they might contain.
The entire graph object is new so that changes in the copy
do not affect the original object. (see Python's copy.deepcopy)
Data Reference (Shallow) -- For a shallow copy the graph structure
is copied but the edge, node and graph attribute dicts are
references to those in the original graph. This saves
time and memory but could cause confusion if you change an attribute
in one graph and it changes the attribute in the other.
NetworkX does not provide this level of shallow copy.
Independent Shallow -- This copy creates new independent attribute
dicts and then does a shallow copy of the attributes. That is, any
attributes that are containers are shared between the new graph
and the original. This is exactly what `dict.copy()` provides.
You can obtain this style copy using:
>>> G = nx.path_graph(5)
>>> H = G.copy()
>>> H = G.copy(as_view=False)
>>> H = nx.Graph(G)
>>> H = G.__class__(G)
Fresh Data -- For fresh data, the graph structure is copied while
new empty data attribute dicts are created. The resulting graph
is independent of the original and it has no edge, node or graph
attributes. Fresh copies are not enabled. Instead use:
>>> H = G.__class__()
>>> H.add_nodes_from(G)
>>> H.add_edges_from(G.edges)
View -- Inspired by dict-views, graph-views act like read-only
versions of the original graph, providing a copy of the original
structure without requiring any memory for copying the information.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Parameters
----------
as_view : bool, optional (default=False)
If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.copy()
"""
if as_view is True:
return nx.graphviews.generic_graph_view(self)
G = self.__class__()
G.graph.update(self.graph)
G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
G.add_edges_from(
(u, v, datadict.copy())
for u, nbrs in self._adj.items()
for v, datadict in nbrs.items()
)
return G
def to_directed(self, as_view=False):
"""Returns a directed representation of the graph.
Returns
-------
G : DiGraph
A directed graph with the same name, same nodes, and with
each edge (u, v, data) replaced by two directed edges
(u, v, data) and (v, u, data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects
in the data structure, those changes do not transfer to the
DiGraph created by this method.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1)]
"""
graph_class = self.to_directed_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
G.add_edges_from(
(u, v, deepcopy(data))
for u, nbrs in self._adj.items()
for v, data in nbrs.items()
)
return G
def to_undirected(self, as_view=False):
"""Returns an undirected copy of the graph.
Parameters
----------
as_view : bool (optional, default=False)
If True return a view of the original undirected graph.
Returns
-------
G : Graph/MultiGraph
A deepcopy of the graph.
See Also
--------
Graph, copy, add_edge, add_edges_from
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar `G = nx.DiGraph(D)` which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the
Graph created by this method.
Examples
--------
>>> G = nx.path_graph(2) # or MultiGraph, etc
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> list(G2.edges)
[(0, 1)]
"""
graph_class = self.to_undirected_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
)
return G
def subgraph(self, nodes):
"""Returns a SubGraph view of the subgraph induced on `nodes`.
The induced subgraph of the graph contains the nodes in `nodes`
and the edges between those nodes.
Parameters
----------
nodes : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : SubGraph View
A subgraph view of the graph. The graph structure cannot be
changed but node/edge attributes can and are shared with the
original graph.
Notes
-----
The graph, edge and node attributes are shared with the original graph.
Changes to the graph structure is ruled out by the view, but changes
to attributes are reflected in the original graph.
To create a subgraph with its own copy of the edge/node attributes use:
G.subgraph(nodes).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([n for n in G if n not in set(nodes)])
Subgraph views are sometimes NOT what you want. In most cases where
you want to do more than simply look at the induced edges, it makes
more sense to just create the subgraph as its own graph with code like:
::
# Create a subgraph SG based on a (possibly multigraph) G
SG = G.__class__()
SG.add_nodes_from((n, G.nodes[n]) for n in largest_wcc)
if SG.is_multigraph():
SG.add_edges_from(
(n, nbr, key, d)
for n, nbrs in G.adj.items()
if n in largest_wcc
for nbr, keydict in nbrs.items()
if nbr in largest_wcc
for key, d in keydict.items()
)
else:
SG.add_edges_from(
(n, nbr, d)
for n, nbrs in G.adj.items()
if n in largest_wcc
for nbr, d in nbrs.items()
if nbr in largest_wcc
)
SG.graph.update(G.graph)
Subgraphs are not guaranteed to preserve the order of nodes or edges
as they appear in the original graph. For example:
>>> G = nx.Graph()
>>> G.add_nodes_from(reversed(range(10)))
>>> list(G)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> list(G.subgraph([1, 3, 2]))
[1, 2, 3]
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.subgraph([0, 1, 2])
>>> list(H.edges)
[(0, 1), (1, 2)]
"""
induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes))
# if already a subgraph, don't make a chain
subgraph = nx.subgraph_view
if hasattr(self, "_NODE_OK"):
return subgraph(
self._graph, filter_node=induced_nodes, filter_edge=self._EDGE_OK
)
return subgraph(self, filter_node=induced_nodes)
def edge_subgraph(self, edges):
"""Returns the subgraph induced by the specified edges.
The induced subgraph contains each edge in `edges` and each
node incident to any one of those edges.
Parameters
----------
edges : iterable
An iterable of edges in this graph.
Returns
-------
G : Graph
An edge-induced subgraph of this graph with the same edge
attributes.
Notes
-----
The graph, edge, and node attributes in the returned subgraph
view are references to the corresponding attributes in the original
graph. The view is read-only.
To create a full graph version of the subgraph with its own copy
of the edge or node attributes, use::
G.edge_subgraph(edges).copy()
Examples
--------
>>> G = nx.path_graph(5)
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
>>> list(H.nodes)
[0, 1, 3, 4]
>>> list(H.edges)
[(0, 1), (3, 4)]
"""
return nx.edge_subgraph(self, edges)
def size(self, weight=None):
"""Returns the number of edges or total of all edge weights.
Parameters
----------
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
Returns
-------
size : numeric
The number of edges or
(if weight keyword is provided) the total weight sum.
If weight is None, returns an int. Otherwise a float
(or more general numeric if the weights are more general).
See Also
--------
number_of_edges
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.size()
3
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge("a", "b", weight=2)
>>> G.add_edge("b", "c", weight=4)
>>> G.size()
2
>>> G.size(weight="weight")
6.0
"""
s = sum(d for v, d in self.degree(weight=weight))
# If `weight` is None, the sum of the degrees is guaranteed to be
# even, so we can perform integer division and hence return an
# integer. Otherwise, the sum of the weighted degrees is not
# guaranteed to be an integer, so we perform "real" division.
return s // 2 if weight is None else s / 2
def number_of_edges(self, u=None, v=None):
"""Returns the number of edges between two nodes.
Parameters
----------
u, v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
Returns
-------
nedges : int
The number of edges in the graph. If nodes `u` and `v` are
specified return the number of edges between those nodes. If
the graph is directed, this only returns the number of edges
from `u` to `v`.
See Also
--------
size
Examples
--------
For undirected graphs, this method counts the total number of
edges in the graph:
>>> G = nx.path_graph(4)
>>> G.number_of_edges()
3
If you specify two nodes, this counts the total number of edges
joining the two nodes:
>>> G.number_of_edges(0, 1)
1
For directed graphs, this method can count the total number of
directed edges from `u` to `v`:
>>> G = nx.DiGraph()
>>> G.add_edge(0, 1)
>>> G.add_edge(1, 0)
>>> G.number_of_edges(0, 1)
1
"""
if u is None:
return int(self.size())
if v in self._adj[u]:
return 1
return 0
def nbunch_iter(self, nbunch=None):
"""Returns an iterator over nodes contained in nbunch that are
also in the graph.
The nodes in an iterable nbunch are checked for membership in the graph
and if not are silently ignored.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
Returns
-------
niter : iterator
An iterator over nodes in nbunch that are also in the graph.
If nbunch is None, iterate over all nodes in the graph.
Raises
------
NetworkXError
If nbunch is not a node or sequence of nodes.
If a node in nbunch is not hashable.
See Also
--------
Graph.__iter__
Notes
-----
When nbunch is an iterator, the returned iterator yields values
directly from nbunch, becoming exhausted when nbunch is exhausted.
To test whether nbunch is a single node, one can use
"if nbunch in self:", even after processing with this routine.
If nbunch is not a node or a (possibly empty) sequence/iterator
or None, a :exc:`NetworkXError` is raised. Also, if any object in
nbunch is not hashable, a :exc:`NetworkXError` is raised.
"""
if nbunch is None: # include all nodes via iterator
bunch = iter(self._adj)
elif nbunch in self: # if nbunch is a single node
bunch = iter([nbunch])
else: # if nbunch is a sequence of nodes
def bunch_iter(nlist, adj):
try:
for n in nlist:
if n in adj:
yield n
except TypeError as err:
exc, message = err, err.args[0]
# capture error for non-sequence/iterator nbunch.
if "iter" in message:
exc = NetworkXError(
"nbunch is not a node or a sequence of nodes."
)
# capture single nodes that are not in the graph.
if "object is not iterable" in message:
exc = NetworkXError(f"Node {nbunch} is not in the graph.")
# capture error for unhashable node.
if "hashable" in message:
exc = NetworkXError(
f"Node {n} in sequence nbunch is not a valid node."
)
raise exc
bunch = bunch_iter(nbunch, self._adj)
return bunch
| Graph |
python | marshmallow-code__marshmallow | tests/test_fields.py | {
"start": 11737,
"end": 18304
} | class ____:
@pytest.mark.parametrize("param", ("only", "exclude", "dump_only", "load_only"))
def test_list_nested_only_exclude_dump_only_load_only_propagated_to_nested(
self, param
):
class Child(Schema):
name = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(fields.Nested(Child))
schema = Family(**{param: ["children.name"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.List)
assert isinstance(children_field.inner, fields.Nested)
assert getattr(children_field.inner.schema, param) == {"name"}
@pytest.mark.parametrize(
("param", "expected_attribute", "expected_dump"),
(
("only", {"name"}, {"children": [{"name": "Lily"}]}),
("exclude", {"name", "surname", "age"}, {"children": [{}]}),
),
)
def test_list_nested_class_only_and_exclude_merged_with_nested(
self, param, expected_attribute, expected_dump
):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(fields.Nested(Child, **{param: ("name", "surname")})) # type: ignore[arg-type]
schema = Family(**{param: ["children.name", "children.age"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.List)
assert getattr(children_field.inner, param) == expected_attribute
family = {"children": [{"name": "Lily", "surname": "Martinez", "age": 15}]}
assert schema.dump(family) == expected_dump
def test_list_nested_class_multiple_dumps(self):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(fields.Nested(Child, only=("name", "age")))
family = {"children": [{"name": "Lily", "surname": "Martinez", "age": 15}]}
assert Family(only=("children.age",)).dump(family) == {
"children": [{"age": 15}]
}
assert Family(only=("children.name",)).dump(family) == {
"children": [{"name": "Lily"}]
}
@pytest.mark.parametrize(
("param", "expected_attribute", "expected_dump"),
(
("only", {"name"}, {"children": [{"name": "Lily"}]}),
("exclude", {"name", "surname", "age"}, {"children": [{}]}),
),
)
def test_list_nested_instance_only_and_exclude_merged_with_nested(
self, param, expected_attribute, expected_dump
):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(fields.Nested(Child(**{param: ("name", "surname")}))) # type: ignore[arg-type]
schema = Family(**{param: ["children.name", "children.age"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.List)
assert isinstance(children_field.inner, fields.Nested)
assert getattr(children_field.inner.schema, param) == expected_attribute
family = {"children": [{"name": "Lily", "surname": "Martinez", "age": 15}]}
assert schema.dump(family) == expected_dump
def test_list_nested_instance_multiple_dumps(self):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(fields.Nested(Child(only=("name", "age"))))
family = {"children": [{"name": "Lily", "surname": "Martinez", "age": 15}]}
assert Family(only=("children.age",)).dump(family) == {
"children": [{"age": 15}]
}
assert Family(only=("children.name",)).dump(family) == {
"children": [{"name": "Lily"}]
}
@pytest.mark.parametrize(
("param", "expected_attribute", "expected_dump"),
(
("only", {"name"}, {"children": [{"name": "Lily"}]}),
("exclude", {"name", "surname", "age"}, {"children": [{}]}),
),
)
def test_list_nested_lambda_only_and_exclude_merged_with_nested(
self, param, expected_attribute, expected_dump
):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.List(
fields.Nested(lambda: Child(**{param: ("name", "surname")})) # type: ignore[arg-type]
)
schema = Family(**{param: ["children.name", "children.age"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.List)
assert isinstance(children_field.inner, fields.Nested)
assert getattr(children_field.inner.schema, param) == expected_attribute
family = {"children": [{"name": "Lily", "surname": "Martinez", "age": 15}]}
assert schema.dump(family) == expected_dump
def test_list_nested_partial_propagated_to_nested(self):
class Child(Schema):
name = fields.String(required=True)
age = fields.Integer(required=True)
class Family(Schema):
children = fields.List(fields.Nested(Child))
payload = {"children": [{"name": "Lucette"}]}
for val in (True, ("children.age",)):
result = Family(partial=val).load(payload)
assert result["children"][0]["name"] == "Lucette"
result = Family().load(payload, partial=val)
assert result["children"][0]["name"] == "Lucette"
for val in (False, ("children.name",)):
with pytest.raises(ValidationError) as excinfo:
result = Family(partial=val).load(payload)
assert excinfo.value.args[0] == {
"children": {0: {"age": ["Missing data for required field."]}}
}
with pytest.raises(ValidationError) as excinfo:
result = Family().load(payload, partial=val)
assert excinfo.value.args[0] == {
"children": {0: {"age": ["Missing data for required field."]}}
}
| TestListNested |
python | google__pytype | pytype/rewrite/flow/state_test.py | {
"start": 2532,
"end": 6220
} | class ____(unittest.TestCase):
def test_merge_into_none(self):
b1 = state.BlockState({})
b2 = b1.merge_into(None)
self.assertIsNot(b1, b2)
self.assertFalse(b2._locals)
self.assertIs(b2._condition, conditions.TRUE)
def test_merge_into_other(self):
c1 = FakeCondition('a')
c2 = FakeCondition('b')
b1 = state.BlockState({}, c1)
b2 = state.BlockState({}, c2)
b3 = b1.merge_into(b2)
self.assertFalse(b3._locals)
self.assertEqual(b3._condition, conditions.Or(c1, c2))
def test_same_variable(self):
var = variables.Variable.from_value(42)
condition1 = FakeCondition('a')
condition2 = FakeCondition('b')
b1 = state.BlockState({'x': var}, condition1)
b2 = state.BlockState({'x': var}, condition2)
b3 = b1.merge_into(b2)
self.assertEqual(b3._locals, {'x': var})
self.assertEqual(b3._condition, conditions.Or(condition1, condition2))
self.assertEqual(b3._locals_with_block_condition, {'x'})
def test_add_block_condition_self(self):
condition = FakeCondition('a')
b1 = state.BlockState({'x': variables.Variable.from_value(42)}, condition)
b2 = state.BlockState({})
b3 = b1.merge_into(b2)
self.assertEqual(set(b3._locals), {'x'})
x = b3._locals['x']
self.assertEqual(len(x.bindings), 1)
self.assertEqual(x.bindings[0], variables.Binding(42, condition))
self.assertIs(b3._condition, conditions.TRUE)
self.assertFalse(b3._locals_with_block_condition)
def test_add_block_condition_other(self):
condition = FakeCondition('a')
b1 = state.BlockState({})
b2 = state.BlockState({'x': variables.Variable.from_value(42)}, condition)
b3 = b1.merge_into(b2)
self.assertEqual(set(b3._locals), {'x'})
x = b3._locals['x']
self.assertEqual(len(x.bindings), 1)
self.assertEqual(x.bindings[0], variables.Binding(42, condition))
self.assertIs(b3._condition, conditions.TRUE)
self.assertFalse(b3._locals_with_block_condition)
def test_noadd_block_condition(self):
condition1 = FakeCondition('a')
var1 = variables.Variable.from_value(42)
b1 = state.BlockState({'x': var1}, condition1,
locals_with_block_condition=set())
condition2 = FakeCondition('b')
var2 = variables.Variable.from_value(3.14)
b2 = state.BlockState({'y': var2}, condition2,
locals_with_block_condition=set())
b3 = b1.merge_into(b2)
self.assertEqual(set(b3._locals), {'x', 'y'})
x = b3._locals['x']
y = b3._locals['y']
self.assertIs(x, var1)
self.assertIs(y, var2)
def test_merge_bindings(self):
var1 = variables.Variable.from_value(42)
b1 = state.BlockState({'x': var1})
var2 = variables.Variable.from_value(3.14)
b2 = state.BlockState({'x': var2})
b3 = b1.merge_into(b2)
self.assertEqual(set(b3._locals), {'x'})
x = b3._locals['x']
self.assertCountEqual(
x.bindings, [variables.Binding(42), variables.Binding(3.14)])
def test_merge_conditions(self):
condition1 = FakeCondition('a')
condition2 = FakeCondition('b')
b1 = state.BlockState(
{'x': variables.Variable((variables.Binding(42, condition1),))},
conditions.TRUE)
b2 = state.BlockState(
{'x': variables.Variable((variables.Binding(42, condition2),))},
conditions.TRUE)
b3 = b1.merge_into(b2)
self.assertEqual(set(b3._locals), {'x'})
x = b3._locals['x']
self.assertEqual(len(x.bindings), 1)
self.assertEqual(x.bindings[0].value, 42)
self.assertEqual(x.bindings[0].condition,
conditions.Or(condition1, condition2))
self.assertEqual(b3._condition, conditions.TRUE)
| MergeIntoTest |
python | getsentry__sentry | src/sentry/audit_log/events.py | {
"start": 591,
"end": 1061
} | class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=2, name="MEMBER_ADD", api_name="member.add")
def render(self, audit_log_entry: AuditLogEntry) -> str:
if audit_log_entry.target_user == audit_log_entry.actor:
return "joined the organization"
member = _get_member_display(audit_log_entry.data.get("email"), audit_log_entry.target_user)
return f"add member {member}"
| MemberAddAuditLogEvent |
python | PrefectHQ__prefect | src/prefect/context.py | {
"start": 25966,
"end": 26420
} | class ____(ContextModel):
"""
The context for `prefect.tags` management.
Attributes:
current_tags: A set of current tags in the context
"""
current_tags: set[str] = Field(default_factory=set)
@classmethod
def get(cls) -> Self:
# Return an empty `TagsContext` instead of `None` if no context exists
return cls.__var__.get(cls())
__var__: ClassVar[ContextVar[Self]] = ContextVar("tags")
| TagsContext |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/common/test_exceptions.py | {
"start": 16124,
"end": 18297
} | class ____:
@pytest.mark.parametrize(
"cause",
[
RuntimeError("Error during Dag serialization process"),
KeyError("required_field"),
ValueError("Missing Dag ID in serialized Dag"),
],
ids=[
"RuntimeError",
"KeyError",
"ValueError",
],
)
def test_handle_deserialization_error(self, cause: Exception) -> None:
deserialization_error = DeserializationError("test_dag_id")
deserialization_error.__cause__ = cause
expected_exception = HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"An error occurred while trying to deserialize Dag: {deserialization_error}",
)
with pytest.raises(HTTPException, match=re.escape(expected_exception.detail)):
DagErrorHandler().exception_handler(Mock(), deserialization_error)
@pytest.mark.usefixtures("testing_dag_bundle")
@pytest.mark.need_serialized_dag
def test_handle_real_dag_deserialization_error(self, session: Session, dag_maker: DagMaker) -> None:
"""Test handling a real Dag deserialization error with actual serialized Dag."""
dag_id = "test_dag"
with dag_maker(dag_id, serialized=True):
EmptyOperator(task_id="task_1")
s_dag_model = session.scalar(select(SerializedDagModel).where(SerializedDagModel.dag_id == dag_id))
assert s_dag_model is not None
assert s_dag_model.data is not None
data = s_dag_model.data
del data["dag"]["dag_id"]
session.execute(
update(SerializedDagModel).where(SerializedDagModel.dag_id == dag_id).values(_data=data)
)
session.commit()
dag_bag = DBDagBag()
with pytest.raises(DeserializationError) as exc_info:
dag_bag.get_latest_version_of_dag(dag_id, session=session)
with pytest.raises(
HTTPException,
match=re.escape(f"An error occurred while trying to deserialize Dag: {exc_info.value}"),
):
DagErrorHandler().exception_handler(Mock(), exc_info.value)
| TestDagErrorHandler |
python | doocs__leetcode | solution/2300-2399/2340.Minimum Adjacent Swaps to Make a Valid Array/Solution.py | {
"start": 0,
"end": 344
} | class ____:
def minimumSwaps(self, nums: List[int]) -> int:
i = j = 0
for k, v in enumerate(nums):
if v < nums[i] or (v == nums[i] and k < i):
i = k
if v >= nums[j] or (v == nums[j] and k > j):
j = k
return 0 if i == j else i + len(nums) - 1 - j - (i > j)
| Solution |
python | zarr-developers__zarr-python | src/zarr/core/array_spec.py | {
"start": 504,
"end": 902
} | class ____(TypedDict):
"""
A TypedDict model of the attributes of an ArrayConfig class, but with no required fields.
This allows for partial construction of an ArrayConfig, with the assumption that the unset
keys will be taken from a global configuration.
"""
order: NotRequired[MemoryOrder]
write_empty_chunks: NotRequired[bool]
@dataclass(frozen=True)
| ArrayConfigParams |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 825544,
"end": 825751
} | class ____(
sgqlc.types.Type, TeamAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
| OrgRestoreMemberMembershipTeamAuditEntryData |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/texteditor.py | {
"start": 857,
"end": 929
} | class ____:
Close = 'close'
Copy = 'copy_action'
| TextEditorActions |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/dagster_run.py | {
"start": 26539,
"end": 26949
} | class ____:
"""Kept here to maintain loading of PipelineRuns from when it was still alive."""
name: str
solid_subset: Optional[Sequence[str]] = None
def assets_are_externally_managed(run: DagsterRun) -> bool:
from dagster._core.storage.tags import EXTERNALLY_MANAGED_ASSETS_TAG
return get_boolean_tag_value(run.tags.get(EXTERNALLY_MANAGED_ASSETS_TAG), default_value=False)
| ExecutionSelector |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_integration.py | {
"start": 5954,
"end": 7132
} | class ____:
"""Test error handling and edge cases."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_nonexistent_symbol_error_handling(self):
"""Test error handling for nonexistent symbols."""
result = self.runner.invoke(
main, ["check", "docstrings", "--symbol", "completely.nonexistent.symbol"]
)
assert result.exit_code == 1
assert "Error:" in result.output or "ERRORS:" in result.output or "✗" in result.output
def test_nonexistent_package_error_handling(self):
"""Test error handling for nonexistent packages."""
result = self.runner.invoke(
main, ["ls", "symbols", "--package", "completely.nonexistent.package"]
)
assert result.exit_code == 1
assert "Error: Could not import package" in result.output
def test_malformed_symbol_error_handling(self):
"""Test error handling for malformed symbol names."""
result = self.runner.invoke(main, ["check", "docstrings", "--symbol", ""])
assert result.exit_code == 1
assert "Error:" in result.output
| TestErrorHandling |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_mixed_precision.py | {
"start": 8818,
"end": 20721
} | class ____(FSDPTest):
@property
def world_size(self):
raise ValueError("To be implemented by child classes")
def _get_simple_nested_model(
self, param_dtype, run_checks, *fsdp_args, **fsdp_kwargs
):
model = FSDP(
nn.Sequential(
FSDP(
LinearMixedPrecision(
param_dtype, buffer_name="buffer0", run_checks=run_checks
).cuda(),
*fsdp_args,
**fsdp_kwargs,
),
LinearMixedPrecision(
param_dtype, buffer_name="buffer1", run_checks=run_checks
).cuda(),
),
*fsdp_args,
**fsdp_kwargs,
)
return model
def _get_simple_model(self, param_dtype, *fsdp_args, **fsdp_kwargs):
model = FSDP(
LinearMixedPrecision(param_dtype).cuda(), *fsdp_args, **fsdp_kwargs
)
return model
def _validate_no_mp_shard(self, fsdp_model):
"""
Validates that there is no mixed precision _mp_shard allocated
when it is not expected to be.
"""
fsdp_units = FSDP.fsdp_modules(fsdp_model)
for fsdp in fsdp_units:
for param in fsdp.params:
self.assertFalse(hasattr(param, "_mp_shard"))
def _validate_mp_shard_freed(self, fsdp_model):
"""
Ensures that the mixed precision shard is greed for all FSDP units.
"""
fsdp_units = FSDP.fsdp_modules(fsdp_model)
for fsdp in fsdp_units:
for param in fsdp.params:
self.assertEqual(0, param._mp_shard.untyped_storage().size())
def _reduce_scatter_validate_mp(
self, orig_reduce_scatter, mp_config, should_run_low_prec, *args, **kwargs
):
"""
Runs reduce-scatter but verifies mixed precision settings before. This
is to test mixed precision is working as expected during backward pass.
In particular it ensures that the gradients were cast to the right type
and comm. is going to happen in the right type.
"""
tensors = []
for x in args:
if isinstance(x, torch.Tensor):
tensors.append(x)
for x in kwargs.values():
if isinstance(x, torch.Tensor):
tensors.append(x)
# reduce_dtype has higher priority than param_dtype, because mixed_precision
# supports overriding param_dtype with reduce_dtype to control the
# reduction precision. In the case where reduce_dtype == param_dtype
# this tests that gradients are in the expected precision as well.
# If reduce_dtype is not specified (is None) we comm. in the param_dtype
# if that is specified, otherwise full precision dtype.
if should_run_low_prec:
expected_dtype = (
mp_config.reduce_dtype
if mp_config.reduce_dtype is not None
else (
mp_config.param_dtype
if mp_config.param_dtype is not None
else _CURRENT_FULL_PRECISION_PARAM_DTYPE
)
)
else:
expected_dtype = _CURRENT_FULL_PRECISION_PARAM_DTYPE
for t in tensors:
self.assertEqual(
expected_dtype,
t.dtype,
f"Expected to reduce in {expected_dtype} but got tensors in {t.dtype}",
)
return orig_reduce_scatter(*args, **kwargs)
def _test_grads_reduced_precision(
self, offload_params: bool, use_orig_params: bool
):
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(10, 10)
self.lin2 = nn.Linear(10, 10)
def forward(self, x):
return self.lin2(self.lin1(x))
m = MyModel().cuda()
mp = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
keep_low_precision_grads=True,
)
fsdp_kwargs = {
"mixed_precision": mp,
"cpu_offload": CPUOffload(offload_params=offload_params),
"use_orig_params": use_orig_params,
}
m.lin1 = FSDP(m.lin1, **fsdp_kwargs)
m = FSDP(m, **fsdp_kwargs)
for _ in range(6):
inp = torch.ones(1, 10)
m(inp).sum().backward()
for param in m.parameters():
if param.grad is not None:
self.assertEqual(torch.float16, param.grad.dtype)
dist.barrier()
def _run_test_mixed_precision_e2e(
self,
mp_config,
cpu_offload,
backward_prefetch,
forward_prefetch,
full_precision_param_dtype,
sharding_strategy,
enable_sharded_grad_scaler,
):
torch.cuda.set_device(self.rank)
fsdp_models = [
self._get_simple_model(
param_dtype=full_precision_param_dtype,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
mixed_precision=mp_config,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch,
),
self._get_simple_nested_model(
param_dtype=full_precision_param_dtype,
run_checks=True,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
mixed_precision=mp_config,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch,
),
]
for model in fsdp_models:
if not cpu_offload.offload_params:
model.cuda()
# Patch reduce_scatter to add validation for mixed precision types.
orig_reduce_scatter = dist.reduce_scatter_tensor
test_reduce_scatter = partial(
self._reduce_scatter_validate_mp,
orig_reduce_scatter,
mp_config,
True,
)
with patch_reduce_scatter(test_reduce_scatter, full_precision_param_dtype):
scaler = ShardedGradScaler(enabled=enable_sharded_grad_scaler)
optim = torch.optim.Adam(model.parameters())
for _ in range(3):
inp = torch.randn(
3, 10, device="cuda", dtype=full_precision_param_dtype
)
# Forward pass of LinearMixedPrecision check casting of
# inputs, params, buffers.
act, *_ = model(
(inp, self, model, mp_config, full_precision_param_dtype)
)
# Buffers should be casted.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
# p._mp_shard should be freed.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
# We never should have allocated an _mp_shard.
self._validate_no_mp_shard(model)
loss = act.sum()
loss = scaler.scale(loss)
if mp_config.param_dtype is not None:
self.assertEqual(loss.dtype, mp_config.param_dtype)
else:
self.assertEqual(loss.dtype, full_precision_param_dtype)
# Will run patched reduce scatter that validates mixed_precision
# types in backward.
loss.backward()
# Buffers stay casted even after backwards.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
# p._mp_shard should be freed.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
self._validate_no_mp_shard(model)
# Ensure params and grads are in full precision,
# as after fwd/backward we maintain full precision shards.
for param in model.parameters():
self.assertEqual(param.dtype, full_precision_param_dtype)
if param.grad is not None:
self.assertEqual(
param.grad.dtype, full_precision_param_dtype
)
# Unscale the gradients and step
scaler.step(optim)
# Update the scale factor
scaler.update()
# Summon full params should be in full precision
with model.summon_full_params(model):
# It is not expected for summon_full_params to allocate
# a mixed precision shard.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
self._validate_no_mp_shard(model)
params = list(model.parameters())
for p in params:
self.assertEqual(p.dtype, full_precision_param_dtype)
# Note that buffers are cast only once and only restored
# to the original buffer dtype in state_dict, so
# summon_full_params is not expected to restore buffer
# types to their original.
named_buffers = dict(model.named_buffers())
for v in named_buffers.values():
if mp_config.buffer_dtype is not None:
self.assertEqual(v.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(v.dtype, _BUFFER_ORIG_DTYPE)
# state_dict should be in full precision
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
for name, tensor in state_dict.items():
# Parameters and buffers are checkpointed in their
# original dtypes, which may be different.
if name in named_buffers:
self.assertEqual(tensor.dtype, _BUFFER_ORIG_DTYPE)
else:
self.assertEqual(
tensor.dtype,
full_precision_param_dtype,
f"{name}: {tensor.dtype} vs {full_precision_param_dtype}",
)
# After state_dict, buffer's dtype should have been restored
# to the mixed precision one.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
| TestFSDPMixedPrecision |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super2.py | {
"start": 964,
"end": 1011
} | class ____:
def __init__(self) -> None: ...
| C |
python | getsentry__sentry | src/sentry/integrations/slack/threads/activity_notifications.py | {
"start": 6364,
"end": 7724
} | class ____(GroupActivityNotification):
metrics_key = "create_issue"
title = "External Issue Created"
def get_description(self) -> tuple[str, str | None, Mapping[str, Any]]:
external_issue = _external_issue_activity_factory(activity=self.activity)
provider = external_issue.get_provider()
# Use proper grammar, so use "an" if it's "external provider" and "a" if it's a regular name
if provider == external_issue.DEFAULT_PROVIDER_FALLBACK_TEXT:
base_template = "an "
else:
base_template = "a "
provider = external_issue.get_formatted_provider_name()
base_template += "{provider} issue"
ticket_number = external_issue.get_ticket_number()
if ticket_number:
base_template += " {ticket}"
link = external_issue.get_link()
if link:
base_template = "<{link}|" + base_template + ">"
# Template should look something like "{author} created <{link}| a/an {provider} issue {ticket}>"
if self.activity.data.get("new", True):
base_template = "{author} created " + base_template
else:
base_template = "{author} linked " + base_template
return base_template, None, {"provider": provider, "ticket": ticket_number, "link": link}
| ExternalIssueCreatedActivityNotification |
python | getsentry__sentry | src/sentry/audit_log/services/log/impl.py | {
"start": 683,
"end": 4037
} | class ____(LogService):
event_id_skip_list_option = "hybrid_cloud.audit_log_event_id_invalid_pass_list"
def record_audit_log(self, *, event: AuditLogEvent) -> None:
entry = AuditLogEntry.from_event(event)
try:
with enforce_constraints(transaction.atomic(router.db_for_write(AuditLogEntry))):
entry.save()
except Exception as e:
if isinstance(e, IntegrityError):
error_message = str(e)
if '"auth_user"' in error_message:
# It is possible that a user existed at the time of serialization but was deleted by the time of consumption
# in which case we follow the database's SET NULL on delete handling.
if event.actor_user_id:
event.actor_user_id = None
if event.target_user_id:
event.target_user_id = None
return self.record_audit_log(event=event)
# Relief hatch for audit logs with known bad states. This allows us
# to clear backlogged outboxes with invalid data.
if not self._should_skip_invalid_event(event):
raise
def record_user_ip(self, *, event: UserIpEvent) -> None:
UserIP.objects.create_or_update(
user_id=event.user_id,
ip_address=event.ip_address,
values=dict(
last_seen=event.last_seen,
country_code=event.country_code,
region_code=event.region_code,
),
)
with unguarded_write(router.db_for_write(User)):
# It greatly simplifies testing not to be too aggressive on updating the last_active due to many
# comparisons with serializers.
User.objects.filter(
id=event.user_id,
last_active__lt=(event.last_seen - datetime.timedelta(minutes=1)),
).update(last_active=event.last_seen)
def find_last_log(
self,
*,
organization_id: int,
target_object_id: int | None,
event: int,
data: dict[str, str] | None = None,
) -> AuditLogEvent | None:
last_entry_q = AuditLogEntry.objects.filter(
organization_id=organization_id,
target_object=target_object_id,
event=event,
)
if data:
last_entry_q = last_entry_q.filter(data=data)
last_entry: AuditLogEntry | None = last_entry_q.last()
if last_entry is None:
return None
return last_entry.as_event()
def _should_skip_invalid_event(self, event: AuditLogEvent) -> bool:
event_id_pass_list = self._get_invalid_event_id_pass_list()
return event.event_id in event_id_pass_list
def _get_invalid_event_id_pass_list(self) -> list[int]:
pass_list = options.get(self.event_id_skip_list_option)
list_valid = isinstance(pass_list, list)
if list_valid:
for item in pass_list:
if not isinstance(item, int):
list_valid = False
break
if not list_valid:
logger.error("audit_log.invalid_audit_log_pass_list", extra={"pass_list": pass_list})
return []
return pass_list
| DatabaseBackedLogService |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 1990,
"end": 2134
} | class ____(ParentBad[int]): # [inherit-non-class]
pass
# Classes that don't implement '__class_getitem__' are marked as unsubscriptable
| Child2 |
python | Netflix__metaflow | metaflow/datastore/datastore_set.py | {
"start": 2317,
"end": 2614
} | class ____(BlobCache):
def __init__(self, preloaded):
self._preloaded = preloaded
def load_key(self, key):
return self._preloaded.get(key)
def store_key(self, key, blob):
# we cache only preloaded keys, so no need to store anything
pass
| ImmutableBlobCache |
python | openai__openai-python | src/openai/types/responses/response_code_interpreter_tool_call_param.py | {
"start": 331,
"end": 538
} | class ____(TypedDict, total=False):
logs: Required[str]
"""The logs output from the code interpreter."""
type: Required[Literal["logs"]]
"""The type of the output. Always `logs`."""
| OutputLogs |
python | ray-project__ray | python/ray/tests/test_tls_auth.py | {
"start": 2438,
"end": 3943
} | class ____:
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_returns=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
""",
env=build_env(),
)
@pytest.mark.skipif(
sys.platform == "darwin",
reason=("Cryptography (TLS dependency) doesn't install in Mac build pipeline"),
)
@pytest.mark.parametrize("use_tls", [True], indirect=True)
def test_client_connect_to_tls_server(use_tls, call_ray_start):
tls_env = build_env() # use_tls fixture sets TLS environment variables
without_tls_env = {k: v for k, v in tls_env.items() if "TLS" not in k}
# Attempt to connect without TLS
with pytest.raises(subprocess.CalledProcessError) as exc_info:
run_string_as_driver(
"""
from ray.util.client import ray as ray_client
ray_client.connect("localhost:10001")
""",
env=without_tls_env,
)
assert "ConnectionError" in exc_info.value.output.decode("utf-8")
# Attempt to connect with TLS
run_string_as_driver(
"""
import ray
from ray.util.client import ray as ray_client
ray_client.connect("localhost:10001")
assert ray.is_initialized()
""",
env=tls_env,
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| Actor |
python | pytorch__pytorch | torch/utils/tensorboard/_pytorch_graph.py | {
"start": 1650,
"end": 2528
} | class ____(NodeBase):
def __init__(self, node_cpp, valid_methods) -> None:
super().__init__(node_cpp)
valid_methods = valid_methods[:]
self.inputs = []
for m in valid_methods:
if m == "inputs" or m == "outputs":
list_of_node = list(getattr(node_cpp, m)())
io_unique_names = []
io_tensor_sizes = []
for n in list_of_node:
io_unique_names.append(n.debugName())
if n.isCompleteTensor():
io_tensor_sizes.append(n.type().sizes())
else:
io_tensor_sizes.append(None)
setattr(self, m, io_unique_names)
setattr(self, m + "tensor_size", io_tensor_sizes)
else:
setattr(self, m, getattr(node_cpp, m)())
| NodePy |
python | pdm-project__pdm | src/pdm/models/backends.py | {
"start": 2374,
"end": 2853
} | class ____:
def __init__(self, expand: bool = True) -> None:
self.expand = expand
def __format__(self, __format_spec: str) -> str:
name, sep, default = __format_spec.partition(":")
if not self.expand:
return f"${{{name}}}"
if name in os.environ:
return os.environ[name]
if not sep:
raise ValueError(f"Nonexistent environment variable must set a default: {name}")
return default
| EnvContext |
python | mlflow__mlflow | mlflow/utils/request_utils.py | {
"start": 967,
"end": 10113
} | class ____(Retry):
"""
urllib3 < 2 doesn't support `backoff_jitter`. This class is a workaround for that.
"""
def __init__(self, *args, backoff_jitter=0.0, **kwargs):
super().__init__(*args, **kwargs)
self.backoff_jitter = backoff_jitter
def get_backoff_time(self):
"""
Source: https://github.com/urllib3/urllib3/commit/214b184923388328919b0a4b0c15bff603aa51be
"""
backoff_value = super().get_backoff_time()
if self.backoff_jitter != 0.0:
backoff_value += random.random() * self.backoff_jitter
# The attribute `BACKOFF_MAX` was renamed to `DEFAULT_BACKOFF_MAX` in this commit:
# https://github.com/urllib3/urllib3/commit/f69b1c89f885a74429cabdee2673e030b35979f0
# which was part of the major release of 2.0 for urllib3 and the support for both
# constants was added in 1.26.9:
# https://github.com/urllib3/urllib3/blob/1.26.9/src/urllib3/util/retry.py
default_backoff = (
Retry.BACKOFF_MAX
if Version(urllib3.__version__) < Version("1.26.9")
else Retry.DEFAULT_BACKOFF_MAX
)
return float(max(0, min(default_backoff, backoff_value)))
def augmented_raise_for_status(response):
"""Wrap the standard `requests.response.raise_for_status()` method and return reason"""
try:
response.raise_for_status()
except HTTPError as e:
if response.text:
raise HTTPError(
f"{e}. Response text: {response.text}", request=e.request, response=e.response
)
else:
raise e
def download_chunk(*, range_start, range_end, headers, download_path, http_uri):
combined_headers = {**headers, "Range": f"bytes={range_start}-{range_end}"}
with cloud_storage_http_request(
"get",
http_uri,
stream=False,
headers=combined_headers,
timeout=10,
) as response:
expected_length = response.headers.get("Content-Length")
if expected_length is not None:
actual_length = response.raw.tell()
expected_length = int(expected_length)
if actual_length < expected_length:
raise IOError(
"Incomplete read ({} bytes read, {} more expected)".format(
actual_length, expected_length - actual_length
)
)
# File will have been created upstream. Use r+b to ensure chunks
# don't overwrite the entire file.
augmented_raise_for_status(response)
with open(download_path, "r+b") as f:
f.seek(range_start)
f.write(response.content)
@lru_cache(maxsize=64)
def _cached_get_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
# To create a new Session object for each process, we use the process id as the cache key.
# This is to avoid sharing the same Session object across processes, which can lead to issues
# such as https://stackoverflow.com/q/3724900.
_pid,
respect_retry_after_header=True,
):
"""
This function should not be called directly. Instead, use `_get_request_session` below.
"""
retry_kwargs = {
"total": max_retries,
"connect": max_retries,
"read": max_retries,
"redirect": max_retries,
"status": max_retries,
"status_forcelist": retry_codes,
"backoff_factor": backoff_factor,
"backoff_jitter": backoff_jitter,
"raise_on_status": raise_on_status,
"respect_retry_after_header": respect_retry_after_header,
}
urllib3_version = Version(urllib3.__version__)
if urllib3_version >= Version("1.26.0"):
retry_kwargs["allowed_methods"] = None
else:
retry_kwargs["method_whitelist"] = None
if urllib3_version < Version("2.0"):
retry = JitteredRetry(**retry_kwargs)
else:
retry = Retry(**retry_kwargs)
from mlflow.environment_variables import (
MLFLOW_HTTP_POOL_CONNECTIONS,
MLFLOW_HTTP_POOL_MAXSIZE,
)
adapter = HTTPAdapter(
pool_connections=MLFLOW_HTTP_POOL_CONNECTIONS.get(),
pool_maxsize=MLFLOW_HTTP_POOL_MAXSIZE.get(),
max_retries=retry,
)
session = requests.Session()
session.mount("https://", adapter)
session.mount("http://", adapter)
return session
def _get_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
respect_retry_after_header,
):
"""Returns a `Requests.Session` object for making an HTTP request.
Args:
max_retries: Maximum total number of retries.
backoff_factor: A time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
backoff_jitter: A random jitter to add to the backoff interval.
retry_codes: A list of HTTP response error codes that qualifies for retry.
raise_on_status: Whether to raise an exception, or return a response, if status falls
in retry_codes range and retries have been exhausted.
respect_retry_after_header: Whether to respect Retry-After header on status codes defined
as Retry.RETRY_AFTER_STATUS_CODES or not.
Returns:
requests.Session object.
"""
return _cached_get_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
_pid=os.getpid(),
respect_retry_after_header=respect_retry_after_header,
)
def _get_http_response_with_retries(
method,
url,
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status=True,
allow_redirects=None,
respect_retry_after_header=True,
**kwargs,
):
"""Performs an HTTP request using Python's `requests` module with an automatic retry policy.
Args:
method: A string indicating the method to use, e.g. "GET", "POST", "PUT".
url: The target URL address for the HTTP request.
max_retries: Maximum total number of retries.
backoff_factor: A time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
backoff_jitter: A random jitter to add to the backoff interval.
retry_codes: A list of HTTP response error codes that qualifies for retry.
raise_on_status: Whether to raise an exception, or return a response, if status falls
in retry_codes range and retries have been exhausted.
kwargs: Additional keyword arguments to pass to `requests.Session.request()`
Returns:
requests.Response object.
"""
session = _get_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
respect_retry_after_header,
)
# the environment variable is hardcoded here to avoid importing mlflow.
# however, documentation is available in environment_variables.py
env_value = os.getenv("MLFLOW_ALLOW_HTTP_REDIRECTS", "true").lower() in ["true", "1"]
allow_redirects = env_value if allow_redirects is None else allow_redirects
return session.request(method, url, allow_redirects=allow_redirects, **kwargs)
def cloud_storage_http_request(
method,
url,
max_retries=5,
backoff_factor=2,
backoff_jitter=1.0,
retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,
timeout=None,
**kwargs,
):
"""Performs an HTTP PUT/GET/PATCH request using Python's `requests` module with automatic retry.
Args:
method: string of 'PUT' or 'GET' or 'PATCH', specify to do http PUT or GET or PATCH.
url: the target URL address for the HTTP request.
max_retries: maximum number of retries before throwing an exception.
backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
backoff_jitter: A random jitter to add to the backoff interval.
retry_codes: a list of HTTP response error codes that qualifies for retry.
timeout: wait for timeout seconds for response from remote server for connect and
read request. Default to None owing to long duration operation in read / write.
kwargs: Additional keyword arguments to pass to `requests.Session.request()`.
Returns:
requests.Response object.
"""
if method.lower() not in ("put", "get", "patch", "delete"):
raise ValueError("Illegal http method: " + method)
return _get_http_response_with_retries(
method,
url,
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
timeout=timeout,
**kwargs,
)
| JitteredRetry |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 29798,
"end": 30578
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = CaninePredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor:
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
| CanineLMPredictionHead |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/webhook.py | {
"start": 2113,
"end": 2218
} | class ____(Exception):
"""Signature value does not match the expected format"""
| MalformedSignatureError |
python | doocs__leetcode | solution/3700-3799/3747.Count Distinct Integers After Removing Zeros/Solution.py | {
"start": 0,
"end": 615
} | class ____:
def countDistinct(self, n: int) -> int:
@cache
def dfs(i: int, zero: bool, lead: bool, lim: bool) -> int:
if i >= len(s):
return 1 if (not zero and not lead) else 0
up = int(s[i]) if lim else 9
ans = 0
for j in range(up + 1):
nxt_zero = zero or (j == 0 and not lead)
nxt_lead = lead and j == 0
nxt_lim = lim and j == up
ans += dfs(i + 1, nxt_zero, nxt_lead, nxt_lim)
return ans
s = str(n)
return dfs(0, False, True, True)
| Solution |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 552318,
"end": 558254
} | class ____(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
is_temp = True
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_temp_boolean_expression(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
return self.analyse_result_type(env)
def analyse_result_type(self, env):
true_val_type = self.true_val.type
false_val_type = self.false_val.type
self.type = PyrexTypes.independent_spanning_type(true_val_type, false_val_type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.type.is_ptr:
if self.true_val.is_ephemeral():
error(self.true_val.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.false_val.is_ephemeral():
error(self.false_val.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if true_val_type.is_pyobject or false_val_type.is_pyobject or self.type.is_pyobject:
if true_val_type != self.type:
self.true_val = self.true_val.coerce_to(self.type, env)
if false_val_type != self.type:
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to_index(self, env):
if not self.true_val.type.is_int:
self.true_val = self.true_val.coerce_to_index(env)
if not self.false_val.type.is_int:
self.false_val = self.false_val.coerce_to_index(env)
self.result_ctype = None
out = self.analyse_result_type(env)
if not out.type.is_int:
# fall back to ordinary coercion since we haven't ended as the correct type
if out is self:
out = super(CondExprNode, out).coerce_to_index(env)
else:
# I believe `analyse_result_type` always returns a CondExprNode but
# handle the opposite case just in case
out = out.coerce_to_index(env)
return out
def coerce_to(self, dst_type, env):
if self.true_val.type != dst_type:
self.true_val = self.true_val.coerce_to(dst_type, env)
if self.false_val.type != dst_type:
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
out = self.analyse_result_type(env)
if out.type != dst_type:
# fall back to ordinary coercion since we haven't ended as the correct type
if out is self:
out = super(CondExprNode, out).coerce_to(dst_type, env)
else:
# I believe `analyse_result_type` always returns a CondExprNode but
# handle the opposite case just in case
out = out.coerce_to(dst_type, env)
return out
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
| CondExprNode |
python | huggingface__transformers | tests/models/rag/test_tokenization_rag.py | {
"start": 1532,
"end": 7310
} | class ____(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> RobertaTokenizer:
return RobertaTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def test_save_load_pretrained_with_saved_config(self):
save_dir = os.path.join(self.tmpdirname, "rag_tokenizer")
rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(save_dir)
rag_tokenizer.save_pretrained(save_dir)
new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config)
self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizer)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, RobertaTokenizer)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
@slow
def test_pretrained_token_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
@slow
def test_pretrained_sequence_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
| RagTokenizerTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/contrib/regular_languages/regex_parser.py | {
"start": 869,
"end": 1357
} | class ____(Node):
"""
Union operation (OR operation) between several grammars. You don't
initialize this yourself, but it's a result of a "Grammar1 | Grammar2"
operation.
"""
def __init__(self, children: list[Node]) -> None:
self.children = children
def __or__(self, other_node: Node) -> AnyNode:
return AnyNode(self.children + [other_node])
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.children!r})"
| AnyNode |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 155580,
"end": 157811
} | class ____:
"""A wrapper for a dynamic library."""
def __init__(
self,
lib_path: str,
) -> None:
self.lib_path = lib_path
self.is_open = False
self.DLL = cdll.LoadLibrary(lib_path)
self.is_open = True
def close(self) -> None:
if self.is_open:
self._dlclose()
self.is_open = False
def _dlclose(self) -> None:
f_dlclose = None
if is_linux():
syms = CDLL(None)
if not hasattr(syms, "dlclose"):
# Apline Linux
syms = CDLL("libc.so")
if hasattr(syms, "dlclose"):
f_dlclose = syms.dlclose
elif is_windows():
import ctypes
kernel32 = ctypes.CDLL("kernel32", use_last_error=True)
f_dlclose = kernel32.FreeLibrary
else:
raise NotImplementedError("Unsupported env, failed to do dlclose!")
if f_dlclose is not None:
if is_linux():
f_dlclose.argtypes = [c_void_p]
f_dlclose(self.DLL._handle)
elif is_windows():
import ctypes
from ctypes import wintypes
f_dlclose.argtypes = [wintypes.HMODULE]
f_dlclose(self.DLL._handle)
else:
log.warning(
"dll unloading function was not found, library may not be unloaded properly!"
)
def __getattr__(self, name: str) -> Callable[..., None]:
if not self.is_open:
raise RuntimeError(f"Cannot use closed DLL library: {self.lib_path}")
method = getattr(self.DLL, name)
def _wrapped_func(*args: Any) -> None:
err = method(*args)
if err:
raise RuntimeError(f"Error in function: {method.__name__}")
return _wrapped_func
def __enter__(self) -> Self:
return self
def __exit__(self, *args: Any) -> None:
self.close()
def __del__(self) -> None:
self.close()
@lru_cache
def binary_error_path(output_path: str) -> str:
"""
standard format for the error path
"""
return output_path + ".error"
@clear_on_fresh_cache
| DLLWrapper |
python | apache__airflow | airflow-core/tests/unit/plugins/priority_weight_strategy.py | {
"start": 1670,
"end": 2194
} | class ____(AirflowPlugin):
# Without this import, the qualname method will not use the correct classes names
from unit.plugins.priority_weight_strategy import (
DecreasingPriorityStrategy,
FactorPriorityWeightStrategy,
StaticTestPriorityWeightStrategy,
)
name = "priority_weight_strategy_plugin"
priority_weight_strategies = [
StaticTestPriorityWeightStrategy,
FactorPriorityWeightStrategy,
DecreasingPriorityStrategy,
]
| TestPriorityWeightStrategyPlugin |
python | sqlalchemy__sqlalchemy | test/ext/test_deprecations.py | {
"start": 352,
"end": 1239
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
FixtureTest.define_tables(metadata)
def test_reflect_true(self):
Base = automap_base(metadata=self.tables_test_metadata)
engine_mock = mock.Mock()
with mock.patch.object(Base.metadata, "reflect") as reflect_mock:
with testing.expect_deprecated(
"The AutomapBase.prepare.reflect parameter is deprecated",
"The AutomapBase.prepare.engine parameter is deprecated",
):
Base.prepare(
engine=engine_mock, reflect=True, schema="some_schema"
)
reflect_mock.assert_called_once_with(
engine_mock,
schema="some_schema",
extend_existing=True,
autoload_replace=False,
)
| AutomapTest |
python | pandas-dev__pandas | pandas/tests/reshape/merge/test_merge_ordered.py | {
"start": 362,
"end": 7561
} | class ____:
def test_basic(self, left, right):
result = merge_ordered(left, right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, np.nan, 2, np.nan, 3, np.nan],
"rvalue": [np.nan, 1, 2, 3, np.nan, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_ffill(self, left, right):
result = merge_ordered(left, right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [np.nan, 1, 2, 3, 3, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_multigroup(self, left, right):
left = pd.concat([left, left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [np.nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
tm.assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
right, left, on="key", right_by="group", fill_method="ffill"
)
tm.assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, right, on="key", left_by="group")
assert result["group"].notna().all()
@pytest.mark.filterwarnings(
"ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"
)
def test_merge_type(self, left, right):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(left)
result = nad.merge(right, on="key")
assert isinstance(result, NotADataFrame)
@pytest.mark.parametrize(
"df_seq, pattern",
[
((), "[Nn]o objects"),
([], "[Nn]o objects"),
({}, "[Nn]o objects"),
([None], "objects.*None"),
([None, None], "objects.*None"),
],
)
def test_empty_sequence_concat(self, df_seq, pattern):
# GH 9157
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
@pytest.mark.parametrize(
"arg", [[DataFrame()], [None, DataFrame()], [DataFrame(), None]]
)
def test_empty_sequence_concat_ok(self, arg):
pd.concat(arg)
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [np.nan, 1, 2, 3, 3] * 2,
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left, right, on, left_by, right_by, expected",
[
(
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
{"T": [2], "E": [1]},
["T"],
["G", "H"],
None,
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
},
),
(
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
{"T": [2], "E": [1]},
"T",
["G", "H"],
None,
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
},
),
(
{"T": [2], "E": [1]},
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
["T"],
None,
["G", "H"],
{
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
"G": ["g"] * 3,
"H": ["h"] * 3,
},
),
],
)
def test_list_type_by(self, left, right, on, left_by, right_by, expected):
# GH 35269
left = DataFrame(left)
right = DataFrame(right)
result = merge_ordered(
left=left,
right=right,
on=on,
left_by=left_by,
right_by=right_by,
)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_left_by_length_equals_to_right_shape0(self):
# GH 38166
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
result = merge_ordered(left, right, on="E", left_by=["G", "H"])
expected = DataFrame(
{"G": ["g"] * 3, "H": ["h"] * 3, "E": [1, 2, 3], "T": [np.nan, 1.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
def test_elements_not_in_by_but_in_df(self):
# GH 38167
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
msg = r"\{'h'\} not found in left columns"
with pytest.raises(KeyError, match=msg):
merge_ordered(left, right, on="E", left_by=["G", "h"])
@pytest.mark.parametrize("invalid_method", ["linear", "carrot"])
def test_ffill_validate_fill_method(self, left, right, invalid_method):
# GH 55884
with pytest.raises(
ValueError, match=re.escape("fill_method must be 'ffill' or None")
):
merge_ordered(left, right, on="key", fill_method=invalid_method)
def test_ffill_left_merge(self):
# GH 57010
df1 = DataFrame(
{
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3, 1, 2, 3],
"group": ["a", "a", "a", "b", "b", "b"],
}
)
df2 = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(
df1, df2, fill_method="ffill", left_by="group", how="left"
)
expected = DataFrame(
{
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3, 1, 2, 3],
"group": ["a", "a", "a", "b", "b", "b"],
"rvalue": [np.nan, 2.0, 2.0, np.nan, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
| TestMergeOrdered |
python | huggingface__transformers | src/transformers/models/biogpt/configuration_biogpt.py | {
"start": 811,
"end": 6215
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an
BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the BioGPT
[microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 42384):
Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BioGptModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
layerdrop (`float`, *optional*, defaults to 0.0):
Please refer to the paper about LayerDrop: https://huggingface.co/papers/1909.11556 for further details
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
Example:
```python
>>> from transformers import BioGptModel, BioGptConfig
>>> # Initializing a BioGPT microsoft/biogpt style configuration
>>> configuration = BioGptConfig()
>>> # Initializing a model from the microsoft/biogpt style configuration
>>> model = BioGptModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "biogpt"
def __init__(
self,
vocab_size=42384,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1024,
initializer_range=0.02,
layer_norm_eps=1e-12,
scale_embedding=True,
use_cache=True,
layerdrop=0.0,
activation_dropout=0.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.scale_embedding = scale_embedding
self.use_cache = use_cache
self.layerdrop = layerdrop
self.activation_dropout = activation_dropout
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
__all__ = ["BioGptConfig"]
| BioGptConfig |
python | getsentry__sentry | src/sentry/api/authentication.py | {
"start": 6829,
"end": 7735
} | class ____(QuietBasicAuthentication):
token_name: ClassVar[bytes]
def accepts_auth(self, auth: list[bytes]) -> bool:
return bool(auth) and auth[0].lower() == self.token_name
def authenticate_token(self, request: Request, token_str: str) -> tuple[Any, Any]:
raise NotImplementedError
def authenticate(self, request: Request):
auth = get_authorization_header(request).split()
if not self.accepts_auth(auth):
return None
if len(auth) == 1:
msg = "Invalid token header. No credentials provided."
raise AuthenticationFailed(msg)
elif len(auth) > 2:
msg = "Invalid token header. Token string should not contain spaces."
raise AuthenticationFailed(msg)
return self.authenticate_token(request, force_str(auth[1]))
@AuthenticationSiloLimit(SiloMode.REGION)
| StandardAuthentication |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dependency.py | {
"start": 28595,
"end": 31243
} | class ____(
NamedTuple("_DynamicCollectDependencyDefinition", [("node_name", str), ("output_name", str)]),
IDependencyDefinition,
):
def get_node_dependencies(self) -> Sequence[DependencyDefinition]:
return [DependencyDefinition(self.node_name, self.output_name)]
def is_fan_in(self) -> bool:
return True
DepTypeAndOutputs: TypeAlias = tuple[
DependencyType,
Union[NodeOutput, list[Union[NodeOutput, type["MappedInputPlaceholder"]]]],
]
InputToOutputMap: TypeAlias = dict[NodeInput, DepTypeAndOutputs]
def _create_handle_dict(
node_dict: Mapping[str, Node],
dep_dict: DependencyMapping[str],
) -> InputToOutputMap:
from dagster._core.definitions.composition import MappedInputPlaceholder
check.mapping_param(node_dict, "node_dict", key_type=str, value_type=Node)
check.two_dim_mapping_param(dep_dict, "dep_dict", value_type=IDependencyDefinition)
handle_dict: InputToOutputMap = {}
for node_name, input_dict in dep_dict.items():
from_node = node_dict[node_name]
for input_name, dep_def in input_dict.items():
if isinstance(
dep_def, (MultiDependencyDefinition, BlockingAssetChecksDependencyDefinition)
):
handles: list[Union[NodeOutput, type[MappedInputPlaceholder]]] = []
for inner_dep in dep_def.get_dependencies_and_mappings():
if isinstance(inner_dep, DependencyDefinition):
handles.append(node_dict[inner_dep.node].get_output(inner_dep.output))
elif inner_dep is MappedInputPlaceholder:
handles.append(inner_dep)
else:
check.failed(
f"Unexpected MultiDependencyDefinition dependencies type {inner_dep}"
)
handle_dict[from_node.get_input(input_name)] = (DependencyType.FAN_IN, handles)
elif isinstance(dep_def, DependencyDefinition):
handle_dict[from_node.get_input(input_name)] = (
DependencyType.DIRECT,
node_dict[dep_def.node].get_output(dep_def.output),
)
elif isinstance(dep_def, DynamicCollectDependencyDefinition):
handle_dict[from_node.get_input(input_name)] = (
DependencyType.DYNAMIC_COLLECT,
node_dict[dep_def.node_name].get_output(dep_def.output_name),
)
else:
check.failed(f"Unknown dependency type {dep_def}")
return handle_dict
| DynamicCollectDependencyDefinition |
python | pola-rs__polars | py-polars/src/polars/_typing.py | {
"start": 1135,
"end": 1374
} | class ____(Protocol):
"""Type protocol for Arrow C Data Interface via Arrow PyCapsule Interface."""
def __arrow_c_array__(
self, requested_schema: object | None = None
) -> tuple[object, object]: ...
| ArrowArrayExportable |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_operators_test.py | {
"start": 1025,
"end": 4857
} | class ____(test_util.TensorFlowTestCase):
def testEqualityOperators(self):
a = ragged_factory_ops.constant([[1, 2], [3]])
b = ragged_factory_ops.constant([[4, 5], [3]])
c = 2
if tf2.enabled() and ops.executing_eagerly_outside_functions():
# Value-based equality:
self.assertAllEqual(a == b, [[False, False], [True]])
self.assertAllEqual(a != b, [[True, True], [False]])
# Value-based equality (w/ broadcasting):
self.assertAllEqual(a == c, [[False, True], [False]])
self.assertAllEqual(a != c, [[True, False], [True]])
else:
# Identity-based equality:
self.assertAllEqual(a == b, False)
self.assertAllEqual(a != b, True)
def testOrderingOperators(self):
x = ragged_factory_ops.constant([[1, 5], [3]])
y = ragged_factory_ops.constant([[4, 5], [1]])
self.assertAllEqual((x > y), [[False, False], [True]])
self.assertAllEqual((x >= y), [[False, True], [True]])
self.assertAllEqual((x < y), [[True, False], [False]])
self.assertAllEqual((x <= y), [[True, True], [False]])
def testArithmeticOperators(self):
x = ragged_factory_ops.constant([[1.0, -2.0], [8.0]])
y = ragged_factory_ops.constant([[4.0, 4.0], [2.0]])
self.assertAllEqual(abs(x), [[1.0, 2.0], [8.0]])
# pylint: disable=invalid-unary-operand-type
self.assertAllEqual((-x), [[-1.0, 2.0], [-8.0]])
self.assertAllEqual((x + y), [[5.0, 2.0], [10.0]])
self.assertAllEqual((3.0 + y), [[7.0, 7.0], [5.0]])
self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]])
self.assertAllEqual((x - y), [[-3.0, -6.0], [6.0]])
self.assertAllEqual((3.0 - y), [[-1.0, -1.0], [1.0]])
self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]])
self.assertAllEqual((x * y), [[4.0, -8.0], [16.0]])
self.assertAllEqual((3.0 * y), [[12.0, 12.0], [6.0]])
self.assertAllEqual((x * 3.0), [[3.0, -6.0], [24.0]])
self.assertAllEqual((x / y), [[0.25, -0.5], [4.0]])
self.assertAllEqual((y / x), [[4.0, -2.0], [0.25]])
self.assertAllEqual((2.0 / y), [[0.5, 0.5], [1.0]])
self.assertAllEqual((x / 2.0), [[0.5, -1.0], [4.0]])
self.assertAllEqual((x // y), [[0.0, -1.0], [4.0]])
self.assertAllEqual((y // x), [[4.0, -2.0], [0.0]])
self.assertAllEqual((2.0 // y), [[0.0, 0.0], [1.0]])
self.assertAllEqual((x // 2.0), [[0.0, -1.0], [4.0]])
self.assertAllEqual((x % y), [[1.0, 2.0], [0.0]])
self.assertAllEqual((y % x), [[0.0, -0.0], [2.0]])
self.assertAllEqual((2.0 % y), [[2.0, 2.0], [0.0]])
self.assertAllEqual((x % 2.0), [[1.0, 0.0], [0.0]])
def testLogicalOperators(self):
# pylint: disable=invalid-unary-operand-type
a = ragged_factory_ops.constant([[True, True], [False]])
b = ragged_factory_ops.constant([[True, False], [False]])
self.assertAllEqual((~a), [[False, False], [True]])
self.assertAllEqual((a & b), [[True, False], [False]])
self.assertAllEqual((a & True), [[True, True], [False]])
self.assertAllEqual((True & b), [[True, False], [False]])
self.assertAllEqual((a | b), [[True, True], [False]])
self.assertAllEqual((a | False), [[True, True], [False]])
self.assertAllEqual((False | b), [[True, False], [False]])
self.assertAllEqual((a ^ b), [[False, True], [False]])
self.assertAllEqual((a ^ True), [[False, False], [True]])
self.assertAllEqual((True ^ b), [[False, True], [True]])
def testDummyOperators(self):
a = ragged_factory_ops.constant([[True, True], [False]])
with self.assertRaisesRegex(TypeError,
'RaggedTensor may not be used as a boolean.'):
bool(a)
with self.assertRaisesRegex(TypeError,
'RaggedTensor may not be used as a boolean.'):
if a:
pass
if __name__ == '__main__':
googletest.main()
| RaggedElementwiseOpsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/aiomysql.py | {
"start": 1977,
"end": 2232
} | class ____(AsyncAdapt_dbapi_cursor):
__slots__ = ()
def _make_new_cursor(
self, connection: AsyncIODBAPIConnection
) -> AsyncIODBAPICursor:
return connection.cursor(self._adapt_connection.dbapi.Cursor)
| AsyncAdapt_aiomysql_cursor |
python | numpy__numpy | numpy/_core/tests/test_item_selection.py | {
"start": 3769,
"end": 4885
} | class ____:
@pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"])
def test_simple(self, dtype):
if dtype.lower() == "m":
dtype += "8[ns]"
# putmask is weird and doesn't care about value length (even shorter)
vals = np.arange(1001).astype(dtype=dtype)
mask = np.random.randint(2, size=1000).astype(bool)
# Use vals.dtype in case of flexible dtype (i.e. string)
arr = np.zeros(1000, dtype=vals.dtype)
zeros = arr.copy()
np.putmask(arr, mask, vals)
assert_array_equal(arr[mask], vals[:len(mask)][mask])
assert_array_equal(arr[~mask], zeros[~mask])
@pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"])
@pytest.mark.parametrize("mode", ["raise", "wrap", "clip"])
def test_empty(self, dtype, mode):
arr = np.zeros(1000, dtype=dtype)
arr_copy = arr.copy()
mask = np.random.randint(2, size=1000).astype(bool)
# Allowing empty values like this is weird...
np.put(arr, mask, [])
assert_array_equal(arr, arr_copy)
| TestPutMask |
python | pytorch__pytorch | test/test_ops.py | {
"start": 101401,
"end": 101742
} | class ____(TestCase):
def test_self_kwargs(self):
"""Verify that we can call the aten ops with all kwargs even if the
argument's name is "self"
"""
torch.ops.aten.reshape.default(self=torch.rand(1, 2), shape=[2])
torch.ops.aten.min.default(self=torch.rand(100))
@unMarkDynamoStrictTest
| TestSelfKwarg |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_sequential.py | {
"start": 828,
"end": 13977
} | class ____(SelectorMixin, MetaEstimatorMixin, BaseEstimator):
"""Transformer that performs Sequential Feature Selection.
This Sequential Feature Selector adds (forward selection) or
removes (backward selection) features to form a feature subset in a
greedy fashion. At each stage, this estimator chooses the best feature to
add or remove based on the cross-validation score of an estimator. In
the case of unsupervised learning, this Sequential Feature Selector
looks only at the features (X), not the desired outputs (y).
Read more in the :ref:`User Guide <sequential_feature_selection>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
An unfitted estimator.
n_features_to_select : "auto", int or float, default="auto"
If `"auto"`, the behaviour depends on the `tol` parameter:
- if `tol` is not `None`, then features are selected while the score
change does not exceed `tol`.
- otherwise, half of the features are selected.
If integer, the parameter is the absolute number of features to select.
If float between 0 and 1, it is the fraction of features to select.
.. versionadded:: 1.1
The option `"auto"` was added in version 1.1.
.. versionchanged:: 1.3
The default changed from `"warn"` to `"auto"` in 1.3.
tol : float, default=None
If the score is not incremented by at least `tol` between two
consecutive feature additions or removals, stop adding or removing.
`tol` can be negative when removing features using `direction="backward"`.
`tol` is required to be strictly positive when doing forward selection.
It can be useful to reduce the number of features at the cost of a small
decrease in the score.
`tol` is enabled only when `n_features_to_select` is `"auto"`.
.. versionadded:: 1.1
direction : {'forward', 'backward'}, default='forward'
Whether to perform forward selection or backward selection.
scoring : str or callable, default=None
Scoring method to use for cross-validation. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)`` that returns a single value.
See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all other
cases, :class:`~sklearn.model_selection.KFold` is used. These splitters
are instantiated with `shuffle=False` so the splits will be the same
across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : int, default=None
Number of jobs to run in parallel. When evaluating a new feature to
add or remove, the cross-validation procedure is parallel over the
folds.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_features_to_select_ : int
The number of features that were selected.
support_ : ndarray of shape (n_features,), dtype=bool
The mask of selected features.
See Also
--------
GenericUnivariateSelect : Univariate feature selector with configurable
strategy.
RFE : Recursive feature elimination based on importance weights.
RFECV : Recursive feature elimination based on importance weights, with
automatic selection of the number of features.
SelectFromModel : Feature selection based on thresholds of importance
weights.
Examples
--------
>>> from sklearn.feature_selection import SequentialFeatureSelector
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
>>> sfs.fit(X, y)
SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
n_features_to_select=3)
>>> sfs.get_support()
array([ True, False, True, True])
>>> sfs.transform(X).shape
(150, 3)
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit"])],
"n_features_to_select": [
StrOptions({"auto"}),
Interval(RealNotInt, 0, 1, closed="right"),
Interval(Integral, 0, None, closed="neither"),
],
"tol": [None, Interval(Real, None, None, closed="neither")],
"direction": [StrOptions({"forward", "backward"})],
"scoring": [None, StrOptions(set(get_scorer_names())), callable],
"cv": ["cv_object"],
"n_jobs": [None, Integral],
}
def __init__(
self,
estimator,
*,
n_features_to_select="auto",
tol=None,
direction="forward",
scoring=None,
cv=5,
n_jobs=None,
):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.tol = tol
self.direction = direction
self.scoring = scoring
self.cv = cv
self.n_jobs = n_jobs
@_fit_context(
# SequentialFeatureSelector.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, **params):
"""Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
**params : dict, default=None
Parameters to be passed to the underlying `estimator`, `cv`
and `scorer` objects.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(params, self, "fit")
tags = self.__sklearn_tags__()
X = validate_data(
self,
X,
accept_sparse="csc",
ensure_min_features=2,
ensure_all_finite=not tags.input_tags.allow_nan,
)
n_features = X.shape[1]
if self.n_features_to_select == "auto":
if self.tol is not None:
# With auto feature selection, `n_features_to_select_` will be updated
# to `support_.sum()` after features are selected.
self.n_features_to_select_ = n_features - 1
else:
self.n_features_to_select_ = n_features // 2
elif isinstance(self.n_features_to_select, Integral):
if self.n_features_to_select >= n_features:
raise ValueError("n_features_to_select must be < n_features.")
self.n_features_to_select_ = self.n_features_to_select
elif isinstance(self.n_features_to_select, Real):
self.n_features_to_select_ = int(n_features * self.n_features_to_select)
if self.tol is not None and self.tol < 0 and self.direction == "forward":
raise ValueError(
"tol must be strictly positive when doing forward selection"
)
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
cloned_estimator = clone(self.estimator)
# the current mask corresponds to the set of features:
# - that we have already *selected* if we do forward selection
# - that we have already *excluded* if we do backward selection
current_mask = np.zeros(shape=n_features, dtype=bool)
n_iterations = (
self.n_features_to_select_
if self.n_features_to_select == "auto" or self.direction == "forward"
else n_features - self.n_features_to_select_
)
old_score = -np.inf
is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
# We only need to verify the routing here and not use the routed params
# because internally the actual routing will also take place inside the
# `cross_val_score` function.
if _routing_enabled():
process_routing(self, "fit", **params)
for _ in range(n_iterations):
new_feature_idx, new_score = self._get_best_new_feature_score(
cloned_estimator, X, y, cv, current_mask, **params
)
if is_auto_select and ((new_score - old_score) < self.tol):
break
old_score = new_score
current_mask[new_feature_idx] = True
if self.direction == "backward":
current_mask = ~current_mask
self.support_ = current_mask
self.n_features_to_select_ = self.support_.sum()
return self
def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask, **params):
# Return the best new feature and its score to add to the current_mask,
# i.e. return the best new feature and its score to add (resp. remove)
# when doing forward selection (resp. backward selection).
# Feature will be added if the current score and past score are greater
# than tol when n_feature is auto,
candidate_feature_indices = np.flatnonzero(~current_mask)
scores = {}
for feature_idx in candidate_feature_indices:
candidate_mask = current_mask.copy()
candidate_mask[feature_idx] = True
if self.direction == "backward":
candidate_mask = ~candidate_mask
X_new = X[:, candidate_mask]
scores[feature_idx] = cross_val_score(
estimator,
X_new,
y,
cv=cv,
scoring=self.scoring,
n_jobs=self.n_jobs,
params=params,
).mean()
new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
return new_feature_idx, scores[new_feature_idx]
def _get_support_mask(self):
check_is_fitted(self)
return self.support_
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = get_tags(self.estimator).input_tags.allow_nan
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
return tags
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
router.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
router.add(
splitter=check_cv(self.cv, classifier=is_classifier(self.estimator)),
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
router.add(
scorer=check_scoring(self.estimator, scoring=self.scoring),
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
return router
| SequentialFeatureSelector |
python | py-pdf__pypdf | pypdf/_doc_common.py | {
"start": 51883,
"end": 52374
} | class ____(Mapping[Any, Any]):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._raw_dict = dict(*args, **kwargs)
def __getitem__(self, key: str) -> Any:
func, arg = self._raw_dict.__getitem__(key)
return func(arg)
def __iter__(self) -> Iterator[Any]:
return iter(self._raw_dict)
def __len__(self) -> int:
return len(self._raw_dict)
def __str__(self) -> str:
return f"LazyDict(keys={list(self.keys())})"
| LazyDict |
python | python__mypy | mypyc/irbuild/for_helpers.py | {
"start": 24779,
"end": 27515
} | class ____(ForGenerator):
"""Generate IR for a for loop over a native generator."""
def need_cleanup(self) -> bool:
# Create a new cleanup block for when the loop is finished.
return True
def init(self, expr_reg: Value, target_type: RType) -> None:
# Define target to contains the generator expression. It's also the iterator.
# If we are inside a generator function, spill these into the environment class.
builder = self.builder
self.iter_target = builder.maybe_spill(expr_reg)
self.target_type = target_type
def gen_condition(self) -> None:
builder = self.builder
line = self.line
self.return_value = Register(object_rprimitive)
err = builder.add(LoadErrorValue(object_rprimitive, undefines=True))
builder.assign(self.return_value, err, line)
# Call generated generator helper method, passing a PyObject ** as the final
# argument that will be used to store the return value in the return value
# register. We ignore the return value but the presence of a return value
# indicates that the generator has finished. This is faster than raising
# and catching StopIteration, which is the non-native way of doing this.
ptr = builder.add(LoadAddress(object_pointer_rprimitive, self.return_value))
nn = builder.none_object()
helper_call = MethodCall(
builder.read(self.iter_target), GENERATOR_HELPER_NAME, [nn, nn, nn, nn, ptr], line
)
# We provide custom handling for error values.
helper_call.error_kind = ERR_NEVER
self.next_reg = builder.add(helper_call)
builder.add(Branch(self.next_reg, self.loop_exit, self.body_block, Branch.IS_ERROR))
def begin_body(self) -> None:
# Assign the value obtained from the generator helper method to the
# lvalue so that it can be referenced by code in the body of the loop.
builder = self.builder
line = self.line
# We unbox here so that iterating with tuple unpacking generates a tuple based
# unpack instead of an iterator based one.
next_reg = builder.coerce(self.next_reg, self.target_type, line)
builder.assign(builder.get_assignment_target(self.index), next_reg, line)
def gen_step(self) -> None:
# Nothing to do here, since we get the next item as part of gen_condition().
pass
def gen_cleanup(self) -> None:
# If return value is NULL (it wasn't assigned to by the generator helper method),
# an exception was raised that we need to propagate.
self.builder.primitive_op(propagate_if_error_op, [self.return_value], self.line)
| ForNativeGenerator |
python | nedbat__coveragepy | coverage/exceptions.py | {
"start": 1172,
"end": 1282
} | class ____(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
| NotPython |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py | {
"start": 7262,
"end": 7588
} | class ____(BaseModel, alias_generator=lambda x: x + '_'):
# MYPY: error: Required dynamic aliases disallowed [pydantic-alias]
x: int
# MYPY: error: Required dynamic aliases disallowed [pydantic-alias]
KwargsAliasGeneratorModel(x=1)
KwargsAliasGeneratorModel(x_=1)
KwargsAliasGeneratorModel(z=1)
| KwargsAliasGeneratorModel |
python | gevent__gevent | src/gevent/tests/test__semaphore.py | {
"start": 11973,
"end": 12249
} | class ____(gevent.Greenlet):
# A greenlet whose switch method will have a low hashcode.
hashcode = 10
def __init__(self, *args, **kwargs):
gevent.Greenlet.__init__(self, *args, **kwargs)
self.switch = SwitchWithFixedHash(self, self.hashcode)
| FirstG |
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 35601,
"end": 36236
} | class ____(nn.Module):
def __init__(self, config: GitConfig):
super().__init__()
self.config = config
self.visual_projection = nn.Sequential(
nn.Linear(config.vision_config.hidden_size, config.hidden_size),
nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps),
)
def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
return self.visual_projection(embeddings)
@auto_docstring(
custom_intro="""
The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states
"""
)
| GitProjection |
python | pytest-dev__pytest-django | pytest_django_test/app/models.py | {
"start": 60,
"end": 168
} | class ____(models.Model):
name: str = models.CharField(max_length=100)
# Routed to database "second".
| Item |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 5439,
"end": 7563
} | class ____(TypeTransformer):
"""
Convert arrays of dicts into JSON-formatted string arrays using double quotes only
when the schema defines a (nullable) array of strings. Output strings use no spaces
after commas and a single space after colons for consistent formatting.
Example:
[{'key': 'campaign', 'value': 'gg_nam_dg_search_brand'}]
→ ['{"key": "campaign","value": "gg_nam_dg_search_brand"}']
"""
def __init__(self, *args, **kwargs):
# apply this transformer during schema normalization phase(s)
config = TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization
super().__init__(config)
# register our custom transform
self.registerCustomTransform(self.get_transform_function())
@staticmethod
def get_transform_function():
def transform_function(original_value: Any, field_schema: Dict[str, Any]) -> Any:
# Skip null values (schema may include 'null')
if original_value is None:
return original_value
# Only apply if schema type includes 'array'
schema_type = field_schema.get("type")
if isinstance(schema_type, list):
if "array" not in schema_type:
return original_value
elif schema_type != "array":
return original_value
# Only apply if items type includes 'string'
items = field_schema.get("items", {}) or {}
items_type = items.get("type")
if isinstance(items_type, list):
if "string" not in items_type:
return original_value
elif items_type != "string":
return original_value
# Transform only lists where every element is a dict
if isinstance(original_value, list) and all(isinstance(el, dict) for el in original_value):
return [json.dumps(el, separators=(",", ": ")) for el in original_value]
return original_value
return transform_function
| DoubleQuotedDictTypeTransformer |
python | dask__distributed | distributed/core.py | {
"start": 55366,
"end": 58069
} | class ____(TypedDict):
status: Literal["OK"]
def error_message(e: BaseException, status: str = "error") -> ErrorMessage:
"""Produce message to send back given an exception has occurred
This does the following:
1. Gets the traceback
2. Truncates the exception and the traceback
3. Serializes the exception and traceback or
4. If they can't be serialized send string versions
5. Format a message and return
See Also
--------
clean_exception : deserialize and unpack message into exception/traceback
"""
MAX_ERROR_LEN = dask.config.get("distributed.admin.max-error-length")
tblib.pickling_support.install(e, *collect_causes(e))
tb = get_traceback()
tb_text = "".join(traceback.format_tb(tb))
e = truncate_exception(e, MAX_ERROR_LEN)
try:
e_bytes = protocol.pickle.dumps(e)
protocol.pickle.loads(e_bytes)
except Exception:
e_bytes = protocol.pickle.dumps(Exception(repr(e)))
e_serialized = protocol.to_serialize(e_bytes)
try:
tb_bytes = protocol.pickle.dumps(tb)
protocol.pickle.loads(tb_bytes)
except Exception:
tb_bytes = protocol.pickle.dumps(tb_text)
if len(tb_bytes) > MAX_ERROR_LEN:
tb_serialized = None
else:
tb_serialized = protocol.to_serialize(tb_bytes)
return {
"status": status,
"exception": e_serialized,
"traceback": tb_serialized,
"exception_text": repr(e),
"traceback_text": tb_text,
}
def clean_exception(
exception: BaseException | bytes | bytearray | str | None,
traceback: types.TracebackType | bytes | str | None = None,
**kwargs: Any,
) -> tuple[
type[BaseException | None], BaseException | None, types.TracebackType | None
]:
"""Reraise exception and traceback. Deserialize if necessary
See Also
--------
error_message : create and serialize errors into message
"""
if isinstance(exception, (bytes, bytearray)):
try:
exception = protocol.pickle.loads(exception)
except Exception:
exception = Exception(exception)
elif isinstance(exception, str):
exception = Exception(exception)
if isinstance(traceback, bytes):
try:
traceback = protocol.pickle.loads(traceback)
except (TypeError, AttributeError):
traceback = None
elif isinstance(traceback, str):
traceback = None # happens if the traceback failed serializing
assert isinstance(exception, BaseException) or exception is None
assert isinstance(traceback, types.TracebackType) or traceback is None
return type(exception), exception, traceback
| OKMessage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum1.py | {
"start": 7171,
"end": 7342
} | class ____(Enum):
A = 1
__B = 2
reveal_type(TestEnum19.A, expected_text="Literal[TestEnum19.A]")
reveal_type(TestEnum19.__B, expected_text="Literal[2]")
| TestEnum19 |
python | sqlalchemy__sqlalchemy | test/sql/test_insert_exec.py | {
"start": 23902,
"end": 39202
} | class ____(fixtures.RemovesEvents, fixtures.TablesTest):
__sparse_driver_backend__ = True
__requires__ = ("insertmanyvalues",)
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column("id", Integer, primary_key=True),
Column("x", String(50)),
Column("y", String(50)),
Column("z", Integer, server_default="5"),
)
Table(
"Unitéble2",
metadata,
Column("méil", Integer, primary_key=True),
Column("\u6e2c\u8a66", Integer),
)
Table(
"extra_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x_value", String(50)),
Column("y_value", String(50)),
)
Table(
"uniq_cons",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50), unique=True),
)
@testing.variation("use_returning", [True, False])
def test_returning_integrity_error(self, connection, use_returning):
"""test for #11532"""
stmt = self.tables.uniq_cons.insert()
if use_returning:
stmt = stmt.returning(self.tables.uniq_cons.c.id)
# pymssql thought it would be funny to use OperationalError for
# a unique key violation.
with expect_raises((exc.IntegrityError, exc.OperationalError)):
connection.execute(
stmt, [{"data": "the data"}, {"data": "the data"}]
)
def test_insert_unicode_keys(self, connection):
table = self.tables["Unitéble2"]
stmt = table.insert().returning(table.c["méil"])
connection.execute(
stmt,
[
{"méil": 1, "\u6e2c\u8a66": 1},
{"méil": 2, "\u6e2c\u8a66": 2},
{"méil": 3, "\u6e2c\u8a66": 3},
],
)
eq_(connection.execute(table.select()).all(), [(1, 1), (2, 2), (3, 3)])
@testing.variation("preserve_rowcount", [True, False])
def test_insert_returning_values(self, connection, preserve_rowcount):
t = self.tables.data
conn = connection
page_size = conn.dialect.insertmanyvalues_page_size or 100
data = [
{"x": "x%d" % i, "y": "y%d" % i}
for i in range(1, page_size * 2 + 27)
]
if preserve_rowcount:
eo = {"preserve_rowcount": True}
else:
eo = {}
result = conn.execute(
t.insert().returning(t.c.x, t.c.y), data, execution_options=eo
)
eq_([tup[0] for tup in result.cursor.description], ["x", "y"])
eq_(result.keys(), ["x", "y"])
assert t.c.x in result.keys()
assert t.c.id not in result.keys()
assert not result._soft_closed
assert isinstance(
result.cursor_strategy,
_cursor.FullyBufferedCursorFetchStrategy,
)
assert not result.closed
eq_(result.mappings().all(), data)
assert result._soft_closed
# assert result.closed
assert result.cursor is None
if preserve_rowcount:
eq_(result.rowcount, len(data))
def test_insert_returning_preexecute_pk(self, metadata, connection):
counter = itertools.count(1)
t = Table(
"t",
self.metadata,
Column(
"id",
Integer,
primary_key=True,
default=lambda: next(counter),
),
Column("data", Integer),
)
metadata.create_all(connection)
result = connection.execute(
t.insert().return_defaults(),
[{"data": 1}, {"data": 2}, {"data": 3}],
)
eq_(result.inserted_primary_key_rows, [(1,), (2,), (3,)])
@testing.requires.ctes_on_dml
@testing.variation("add_expr_returning", [True, False])
def test_insert_w_bindparam_in_nested_insert(
self, connection, add_expr_returning
):
"""test related to #9173"""
data, extra_table = self.tables("data", "extra_table")
inst = (
extra_table.insert()
.values(x_value="x", y_value="y")
.returning(extra_table.c.id)
.cte("inst")
)
stmt = (
data.insert()
.values(x="the x", z=select(inst.c.id).scalar_subquery())
.add_cte(inst)
)
if add_expr_returning:
stmt = stmt.returning(data.c.id, data.c.y + " returned y")
else:
stmt = stmt.returning(data.c.id)
result = connection.execute(
stmt,
[
{"y": "y1"},
{"y": "y2"},
{"y": "y3"},
],
)
result_rows = result.all()
ids = [row[0] for row in result_rows]
extra_row = connection.execute(
select(extra_table).order_by(extra_table.c.id)
).one()
extra_row_id = extra_row[0]
eq_(extra_row, (extra_row_id, "x", "y"))
eq_(
connection.execute(select(data).order_by(data.c.id)).all(),
[
(ids[0], "the x", "y1", extra_row_id),
(ids[1], "the x", "y2", extra_row_id),
(ids[2], "the x", "y3", extra_row_id),
],
)
@testing.requires.provisioned_upsert
def test_upsert_w_returning(self, connection):
"""test cases that will execise SQL similar to that of
test/orm/dml/test_bulk_statements.py
"""
data = self.tables.data
initial_data = [
{"x": "x1", "y": "y1", "z": 4},
{"x": "x2", "y": "y2", "z": 8},
]
ids = connection.scalars(
data.insert().returning(data.c.id), initial_data
).all()
upsert_data = [
{
"id": ids[0],
"x": "x1",
"y": "y1",
},
{
"id": 32,
"x": "x19",
"y": "y7",
},
{
"id": ids[1],
"x": "x5",
"y": "y6",
},
{
"id": 28,
"x": "x9",
"y": "y15",
},
]
stmt = provision.upsert(
config,
data,
(data,),
set_lambda=lambda inserted: {"x": inserted.x + " upserted"},
)
result = connection.execute(stmt, upsert_data)
eq_(
result.all(),
[
(ids[0], "x1 upserted", "y1", 4),
(32, "x19", "y7", 5),
(ids[1], "x5 upserted", "y2", 8),
(28, "x9", "y15", 5),
],
)
@testing.combinations(True, False, argnames="use_returning")
@testing.combinations(1, 2, argnames="num_embedded_params")
@testing.combinations(True, False, argnames="use_whereclause")
@testing.crashes(
"+mariadbconnector",
"returning crashes, regular executemany malfunctions",
)
def test_insert_w_bindparam_in_subq(
self, connection, use_returning, num_embedded_params, use_whereclause
):
"""test #8639
see also test_insert_w_bindparam_in_nested_insert
"""
t = self.tables.data
extra = self.tables.extra_table
conn = connection
connection.execute(
extra.insert(),
[
{"x_value": "p1", "y_value": "yv1"},
{"x_value": "p2", "y_value": "yv2"},
{"x_value": "p1_p1", "y_value": "yv3"},
{"x_value": "p2_p2", "y_value": "yv4"},
],
)
if num_embedded_params == 1:
if use_whereclause:
scalar_subq = select(bindparam("paramname")).scalar_subquery()
params = [
{"paramname": "p1_p1", "y": "y1"},
{"paramname": "p2_p2", "y": "y2"},
]
else:
scalar_subq = (
select(extra.c.x_value)
.where(extra.c.y_value == bindparam("y_value"))
.scalar_subquery()
)
params = [
{"y_value": "yv3", "y": "y1"},
{"y_value": "yv4", "y": "y2"},
]
elif num_embedded_params == 2:
if use_whereclause:
scalar_subq = (
select(
bindparam("paramname1", type_=String) + extra.c.x_value
)
.where(extra.c.y_value == bindparam("y_value"))
.scalar_subquery()
)
params = [
{"paramname1": "p1_", "y_value": "yv1", "y": "y1"},
{"paramname1": "p2_", "y_value": "yv2", "y": "y2"},
]
else:
scalar_subq = select(
bindparam("paramname1", type_=String)
+ bindparam("paramname2", type_=String)
).scalar_subquery()
params = [
{"paramname1": "p1_", "paramname2": "p1", "y": "y1"},
{"paramname1": "p2_", "paramname2": "p2", "y": "y2"},
]
else:
assert False
stmt = t.insert().values(x=scalar_subq)
if use_returning:
stmt = stmt.returning(t.c["x", "y"])
result = conn.execute(stmt, params)
if use_returning:
eq_(result.all(), [("p1_p1", "y1"), ("p2_p2", "y2")])
result = conn.execute(select(t.c["x", "y"]))
eq_(result.all(), [("p1_p1", "y1"), ("p2_p2", "y2")])
@testing.variation("preserve_rowcount", [True, False])
def test_insert_returning_defaults(self, connection, preserve_rowcount):
t = self.tables.data
if preserve_rowcount:
conn = connection.execution_options(preserve_rowcount=True)
else:
conn = connection
result = conn.execute(t.insert(), {"x": "x0", "y": "y0"})
first_pk = result.inserted_primary_key[0]
page_size = conn.dialect.insertmanyvalues_page_size or 100
total_rows = page_size * 5 + 27
data = [{"x": "x%d" % i, "y": "y%d" % i} for i in range(1, total_rows)]
result = conn.execute(t.insert().returning(t.c.id, t.c.z), data)
eq_(
result.all(),
[(pk, 5) for pk in range(1 + first_pk, total_rows + first_pk)],
)
if preserve_rowcount:
eq_(result.rowcount, total_rows - 1) # range starts from 1
def test_insert_return_pks_default_values(self, connection):
"""test sending multiple, empty rows into an INSERT and getting primary
key values back.
This has to use a format that indicates at least one DEFAULT in
multiple parameter sets, i.e. "INSERT INTO table (anycol) VALUES
(DEFAULT) (DEFAULT) (DEFAULT) ... RETURNING col"
if the database doesnt support this (like SQLite, mssql), it
actually runs the statement that many times on the cursor.
This is much less efficient, but is still more efficient than
how it worked previously where we'd run the statement that many
times anyway.
There's ways to make it work for those, such as on SQLite
we can use "INSERT INTO table (pk_col) VALUES (NULL) RETURNING pk_col",
but that assumes an autoincrement pk_col, not clear how this
could be produced generically.
"""
t = self.tables.data
conn = connection
result = conn.execute(t.insert(), {"x": "x0", "y": "y0"})
first_pk = result.inserted_primary_key[0]
page_size = conn.dialect.insertmanyvalues_page_size or 100
total_rows = page_size * 2 + 27
data = [{} for i in range(1, total_rows)]
result = conn.execute(t.insert().returning(t.c.id), data)
eq_(
result.all(),
[(pk,) for pk in range(1 + first_pk, total_rows + first_pk)],
)
@testing.combinations(None, 100, 329, argnames="batchsize")
@testing.combinations(
"engine",
"conn_execution_option",
"exec_execution_option",
"stmt_execution_option",
argnames="paramtype",
)
def test_page_size_adjustment(self, testing_engine, batchsize, paramtype):
t = self.tables.data
if paramtype == "engine" and batchsize is not None:
e = testing_engine(
options={
"insertmanyvalues_page_size": batchsize,
},
)
# sqlite, since this is a new engine, re-create the table
if not testing.requires.independent_connections.enabled:
t.create(e, checkfirst=True)
else:
e = testing.db
totalnum = 1275
data = [{"x": "x%d" % i, "y": "y%d" % i} for i in range(1, totalnum)]
insert_count = 0
with e.begin() as conn:
@event.listens_for(conn, "before_cursor_execute")
def go(conn, cursor, statement, parameters, context, executemany):
nonlocal insert_count
if statement.startswith("INSERT"):
insert_count += 1
stmt = t.insert()
if batchsize is None or paramtype == "engine":
conn.execute(stmt.returning(t.c.id), data)
elif paramtype == "conn_execution_option":
conn = conn.execution_options(
insertmanyvalues_page_size=batchsize
)
conn.execute(stmt.returning(t.c.id), data)
elif paramtype == "stmt_execution_option":
stmt = stmt.execution_options(
insertmanyvalues_page_size=batchsize
)
conn.execute(stmt.returning(t.c.id), data)
elif paramtype == "exec_execution_option":
conn.execute(
stmt.returning(t.c.id),
data,
execution_options=dict(
insertmanyvalues_page_size=batchsize
),
)
else:
assert False
assert_batchsize = batchsize or 1000
eq_(
insert_count,
totalnum // assert_batchsize
+ (1 if totalnum % assert_batchsize else 0),
)
def test_disabled(self, testing_engine):
e = testing_engine(
options={"use_insertmanyvalues": False, "sqlite_share_pool": True},
)
totalnum = 1275
data = [{"x": "x%d" % i, "y": "y%d" % i} for i in range(1, totalnum)]
t = self.tables.data
with e.begin() as conn:
stmt = t.insert()
with expect_raises_message(
exc.StatementError,
"with current server capabilities does not support "
"INSERT..RETURNING when executemany",
):
conn.execute(stmt.returning(t.c.id), data)
| InsertManyValuesTest |
python | openai__openai-python | src/openai/resources/beta/beta.py | {
"start": 977,
"end": 2236
} | class ____(SyncAPIResource):
@cached_property
def chat(self) -> Chat:
return Chat(self._client)
@cached_property
def realtime(self) -> Realtime:
return Realtime(self._client)
@cached_property
def chatkit(self) -> ChatKit:
return ChatKit(self._client)
@cached_property
def assistants(self) -> Assistants:
return Assistants(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> BetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return BetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return BetaWithStreamingResponse(self)
| Beta |
python | joke2k__faker | faker/providers/automotive/da_DK/__init__.py | {
"start": 48,
"end": 270
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``da_DK`` locale.
Source: https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Denmark
"""
license_formats = ("?? ## ###",)
| Provider |
python | walkccc__LeetCode | solutions/1711. Count Good Meals/1711.py | {
"start": 0,
"end": 334
} | class ____:
def countPairs(self, deliciousness: list[int]) -> int:
MOD = 10**9 + 7
MAX_BIT = 20 + 1
ans = 0
count = collections.Counter()
for d in deliciousness:
for i in range(MAX_BIT + 1):
power = 1 << i
ans += count[power - d]
ans %= MOD
count[d] += 1
return ans
| Solution |
python | doocs__leetcode | solution/2400-2499/2476.Closest Nodes Queries in a Binary Search Tree/Solution.py | {
"start": 192,
"end": 823
} | class ____:
def closestNodes(
self, root: Optional[TreeNode], queries: List[int]
) -> List[List[int]]:
def dfs(root: Optional[TreeNode]):
if root is None:
return
dfs(root.left)
nums.append(root.val)
dfs(root.right)
nums = []
dfs(root)
ans = []
for x in queries:
i = bisect_left(nums, x + 1) - 1
j = bisect_left(nums, x)
mi = nums[i] if 0 <= i < len(nums) else -1
mx = nums[j] if 0 <= j < len(nums) else -1
ans.append([mi, mx])
return ans
| Solution |
python | arrow-py__arrow | arrow/locales.py | {
"start": 12190,
"end": 13709
} | class ____(Locale):
names = ["es", "es-es"]
past = "hace {0}"
future = "en {0}"
and_word = "y"
timeframes = {
"now": "ahora",
"second": "un segundo",
"seconds": "{0} segundos",
"minute": "un minuto",
"minutes": "{0} minutos",
"hour": "una hora",
"hours": "{0} horas",
"day": "un día",
"days": "{0} días",
"week": "una semana",
"weeks": "{0} semanas",
"month": "un mes",
"months": "{0} meses",
"year": "un año",
"years": "{0} años",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"enero",
"febrero",
"marzo",
"abril",
"mayo",
"junio",
"julio",
"agosto",
"septiembre",
"octubre",
"noviembre",
"diciembre",
]
month_abbreviations = [
"",
"ene",
"feb",
"mar",
"abr",
"may",
"jun",
"jul",
"ago",
"sep",
"oct",
"nov",
"dic",
]
day_names = [
"",
"lunes",
"martes",
"miércoles",
"jueves",
"viernes",
"sábado",
"domingo",
]
day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n: int) -> str:
return f"{n}º"
| SpanishLocale |
python | scikit-learn__scikit-learn | sklearn/ensemble/_bagging.py | {
"start": 23682,
"end": 42617
} | class ____(ClassifierMixin, BaseBagging):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeClassifier`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` unweighted samples
or `max_samples * sample_weight.sum()` weighted samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling without
replacement is performed. If fitting with `sample_weight`, it is
strongly recommended to choose True, as only drawing with replacement
will ensure the expected frequency semantics of `sample_weight`.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
BaggingRegressor : A Bagging regressor.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = BaggingClassifier(estimator=SVC(),
... n_estimators=10, random_state=0).fit(X, y)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def _get_estimator(self):
"""Resolve which estimator to return (default is DecisionTreeClassifier)"""
if self.estimator is None:
return DecisionTreeClassifier()
return self.estimator
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
n_classes_ = self.n_classes_
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features]
)
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis]
oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X, **params):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
_raise_for_params(params, self, "predict")
predicted_probabilitiy = self.predict_proba(X, **params)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
def predict_proba(self, X, **params):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
_raise_for_params(params, self, "predict_proba")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict_proba", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict_proba=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
predict_params=routed_params.estimator.get("predict", None),
predict_proba_params=routed_params.estimator.get("predict_proba", None),
)
for i in range(n_jobs)
)
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X, **params):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_log_proba`, the `predict_proba` or the
`proba` method of the sub-estimators via the metadata routing API. The
routing is tried in the mentioned order depending on whether this method is
available on the sub-estimator.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
_raise_for_params(params, self, "predict_log_proba")
check_is_fitted(self)
if hasattr(self.estimator_, "predict_log_proba"):
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict_log_proba", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict_log_proba=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
params=routed_params.estimator.predict_log_proba,
)
for i in range(n_jobs)
)
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
else:
log_proba = np.log(self.predict_proba(X, **params))
return log_proba
@available_if(
_estimator_has("decision_function", delegates=("estimators_", "estimator"))
)
def decision_function(self, X, **params):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `decision_function` method of the sub-estimators
via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
_raise_for_params(params, self, "decision_function")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "decision_function", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(decision_function=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
params=routed_params.estimator.decision_function,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
| BaggingClassifier |
python | langchain-ai__langchain | libs/partners/openai/langchain_openai/chat_models/azure.py | {
"start": 1091,
"end": 44809
} | class ____(BaseChatOpenAI):
r"""Azure OpenAI chat model integration.
Setup:
Head to the Azure [OpenAI quickstart guide](https://learn.microsoft.com/en-us/azure/ai-foundry/openai/chatgpt-quickstart?tabs=keyless%2Ctypescript-keyless%2Cpython-new%2Ccommand-line&pivots=programming-language-python)
to create your Azure OpenAI deployment.
Then install `langchain-openai` and set environment variables
`AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT`:
```bash
pip install -U langchain-openai
export AZURE_OPENAI_API_KEY="your-api-key"
export AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/"
```
Key init args — completion params:
azure_deployment:
Name of Azure OpenAI deployment to use.
temperature:
Sampling temperature.
max_tokens:
Max number of tokens to generate.
logprobs:
Whether to return logprobs.
Key init args — client params:
api_version:
Azure OpenAI REST API version to use (distinct from the version of the
underlying model). [See more on the different versions.](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning)
timeout:
Timeout for requests.
max_retries:
Max number of retries.
organization:
OpenAI organization ID. If not passed in will be read from env
var `OPENAI_ORG_ID`.
model:
The name of the underlying OpenAI model. Used for tracing and token
counting. Does not affect completion. E.g. `'gpt-4'`, `'gpt-35-turbo'`, etc.
model_version:
The version of the underlying OpenAI model. Used for tracing and token
counting. Does not affect completion. E.g., `'0125'`, `'0125-preview'`, etc.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from langchain_openai import AzureChatOpenAI
model = AzureChatOpenAI(
azure_deployment="your-deployment",
api_version="2024-05-01-preview",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# organization="...",
# model="gpt-35-turbo",
# model_version="0125",
# other params...
)
```
!!! note
Any param which is not explicitly supported will be passed directly to the
`openai.AzureOpenAI.chat.completions.create(...)` API every time to the model is
invoked.
For example:
```python
from langchain_openai import AzureChatOpenAI
import openai
AzureChatOpenAI(..., logprobs=True).invoke(...)
# results in underlying API call of:
openai.AzureOpenAI(..).chat.completions.create(..., logprobs=True)
# which is also equivalent to:
AzureChatOpenAI(...).invoke(..., logprobs=True)
```
Invoke:
```python
messages = [
(
"system",
"You are a helpful translator. Translate the user sentence to French.",
),
("human", "I love programming."),
]
model.invoke(messages)
```
```python
AIMessage(
content="J'adore programmer.",
usage_metadata={
"input_tokens": 28,
"output_tokens": 6,
"total_tokens": 34,
},
response_metadata={
"token_usage": {
"completion_tokens": 6,
"prompt_tokens": 28,
"total_tokens": 34,
},
"model_name": "gpt-4",
"system_fingerprint": "fp_7ec89fabc6",
"prompt_filter_results": [
{
"prompt_index": 0,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
}
],
"finish_reason": "stop",
"logprobs": None,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
},
id="run-6d7a5282-0de0-4f27-9cc0-82a9db9a3ce9-0",
)
```
Stream:
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
```python
AIMessageChunk(content="", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="J", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="'", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(
content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
)
AIMessageChunk(content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(
content="",
response_metadata={
"finish_reason": "stop",
"model_name": "gpt-4",
"system_fingerprint": "fp_811936bd4f",
},
id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f",
)
```
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
```python
AIMessageChunk(
content="J'adore la programmation.",
response_metadata={
"finish_reason": "stop",
"model_name": "gpt-4",
"system_fingerprint": "fp_811936bd4f",
},
id="run-ba60e41c-9258-44b8-8f3a-2f10599643b3",
)
```
Async:
```python
await model.ainvoke(messages)
# stream:
# async for chunk in (await model.astream(messages))
# batch:
# await model.abatch([messages])
```
Tool calling:
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
```
```python
[
{
"name": "GetWeather",
"args": {"location": "Los Angeles, CA"},
"id": "call_6XswGD5Pqk8Tt5atYr7tfenU",
},
{
"name": "GetWeather",
"args": {"location": "New York, NY"},
"id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi",
},
{
"name": "GetPopulation",
"args": {"location": "Los Angeles, CA"},
"id": "call_49CFW8zqC9W7mh7hbMLSIrXw",
},
{
"name": "GetPopulation",
"args": {"location": "New York, NY"},
"id": "call_6ghfKxV264jEfe1mRIkS3PE7",
},
]
```
Structured output:
```python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(
description="How funny the joke is, from 1 to 10"
)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
Joke(
setup="Why was the cat sitting on the computer?",
punchline="To keep an eye on the mouse!",
rating=None,
)
```
See `AzureChatOpenAI.with_structured_output()` for more.
JSON mode:
```python
json_model = model.bind(response_format={"type": "json_object"})
ai_msg = json_model.invoke(
"Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
)
ai_msg.content
```
```python
'\\n{\\n "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\\n}'
```
Image input:
```python
import base64
import httpx
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
]
)
ai_msg = model.invoke([message])
ai_msg.content
```
```python
"The weather in the image appears to be quite pleasant. The sky is mostly clear"
```
Token usage:
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```
```python
{"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}
```
Logprobs:
```python
logprobs_model = model.bind(logprobs=True)
ai_msg = logprobs_model.invoke(messages)
ai_msg.response_metadata["logprobs"]
```
```python
{
"content": [
{
"token": "J",
"bytes": [74],
"logprob": -4.9617593e-06,
"top_logprobs": [],
},
{
"token": "'adore",
"bytes": [39, 97, 100, 111, 114, 101],
"logprob": -0.25202933,
"top_logprobs": [],
},
{
"token": " la",
"bytes": [32, 108, 97],
"logprob": -0.20141791,
"top_logprobs": [],
},
{
"token": " programmation",
"bytes": [
32,
112,
114,
111,
103,
114,
97,
109,
109,
97,
116,
105,
111,
110,
],
"logprob": -1.9361265e-07,
"top_logprobs": [],
},
{
"token": ".",
"bytes": [46],
"logprob": -1.2233183e-05,
"top_logprobs": [],
},
]
}
```
Response metadata
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```python
{
"token_usage": {
"completion_tokens": 6,
"prompt_tokens": 28,
"total_tokens": 34,
},
"model_name": "gpt-35-turbo",
"system_fingerprint": None,
"prompt_filter_results": [
{
"prompt_index": 0,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
}
],
"finish_reason": "stop",
"logprobs": None,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
}
```
""" # noqa: E501
azure_endpoint: str | None = Field(
default_factory=from_env("AZURE_OPENAI_ENDPOINT", default=None)
)
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: str | None = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`
!!! note
This means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str | None = Field(
alias="api_version",
default_factory=from_env("OPENAI_API_VERSION", default=None),
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# Check OPENAI_API_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
openai_api_key: SecretStr | None = Field(
alias="api_key",
default_factory=secret_from_env(
["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None
),
)
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: SecretStr | None = Field(
default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None)
)
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more, see [this page](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id).
"""
azure_ad_token_provider: Callable[[], str] | None = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every sync request. For async requests,
will be invoked if `azure_ad_async_token_provider` is not provided.
"""
azure_ad_async_token_provider: Callable[[], Awaitable[str]] | None = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every async request.
"""
model_version: str = ""
"""The version of the model (e.g. `'0125'` for `'gpt-3.5-0125'`).
Azure OpenAI doesn't return model version with the response by default so it must
be manually specified if you want to use this information downstream, e.g. when
calculating costs.
When you specify the version, it will be appended to the model name in the
response. Setting correct version will help you to calculate the cost properly.
Model version is not validated, so make sure you set it correctly to get the
correct cost.
"""
openai_api_type: str | None = Field(
default_factory=from_env("OPENAI_API_TYPE", default="azure")
)
"""Legacy, for `openai<1.0.0` support."""
validate_base_url: bool = True
"""If legacy arg `openai_api_base` is passed in, try to infer if it is a
`base_url` or `azure_endpoint` and update client params accordingly.
"""
model_name: str | None = Field(default=None, alias="model") # type: ignore[assignment]
"""Name of the deployed OpenAI model, e.g. `'gpt-4o'`, `'gpt-35-turbo'`, etc.
Distinct from the Azure deployment name, which is set by the Azure user.
Used for tracing and token counting.
!!! warning
Does NOT affect completion.
"""
disabled_params: dict[str, Any] | None = Field(default=None)
"""Parameters of the OpenAI client or chat.completions endpoint that should be
disabled for the given model.
Should be specified as `{"param": None | ['val1', 'val2']}` where the key is the
parameter and the value is either None, meaning that parameter should never be
used, or it's a list of disabled values for the parameter.
For example, older models may not support the `'parallel_tool_calls'` parameter at
all, in which case `disabled_params={"parallel_tool_calls: None}` can ben passed
in.
If a parameter is disabled then it will not be used by default in any methods, e.g.
in
`langchain_openai.chat_models.azure.AzureChatOpenAI.with_structured_output`.
However this does not prevent a user from directly passed in the parameter during
invocation.
By default, unless `model_name="gpt-4o"` is specified, then
`'parallel_tools_calls'` will be disabled.
"""
max_tokens: int | None = Field(default=None, alias="max_completion_tokens") # type: ignore[assignment]
"""Maximum number of tokens to generate."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "chat_models", "azure_openai"]`
"""
return ["langchain", "chat_models", "azure_openai"]
@property
def lc_secrets(self) -> dict[str, str]:
"""Get the mapping of secret environment variables."""
return {
"openai_api_key": "AZURE_OPENAI_API_KEY",
"azure_ad_token": "AZURE_OPENAI_AD_TOKEN",
}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the class is serializable in langchain."""
return True
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n is not None and self.n < 1:
msg = "n must be at least 1."
raise ValueError(msg)
if self.n is not None and self.n > 1 and self.streaming:
msg = "n must be 1 when streaming."
raise ValueError(msg)
if self.disabled_params is None:
# As of 09-17-2024 'parallel_tool_calls' param is only supported for gpt-4o.
if self.model_name and self.model_name == "gpt-4o":
pass
else:
self.disabled_params = {"parallel_tool_calls": None}
# Check OPENAI_ORGANIZATION for backwards compatibility.
self.openai_organization = (
self.openai_organization
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
# Enable stream_usage by default if using default base URL and client
if all(
getattr(self, key, None) is None
for key in (
"stream_usage",
"openai_proxy",
"openai_api_base",
"base_url",
"client",
"root_client",
"async_client",
"root_async_client",
"http_client",
"http_async_client",
)
):
self.stream_usage = True
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
msg = (
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`)."
)
raise ValueError(msg)
if self.deployment_name:
msg = (
"As of openai>=1.0.0, if `azure_deployment` (or alias "
"`deployment_name`) is specified then "
"`base_url` (or alias `openai_api_base`) should not be. "
"If specifying `azure_deployment`/`deployment_name` then use "
"`azure_endpoint` instead of `base_url`.\n\n"
"For example, you could specify:\n\n"
'azure_endpoint="https://xxx.openai.azure.com/", '
'azure_deployment="my-deployment"\n\n'
"Or you can equivalently specify:\n\n"
'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"'
)
raise ValueError(msg)
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment_name,
"api_key": (
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"azure_ad_token": (
self.azure_ad_token.get_secret_value() if self.azure_ad_token else None
),
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"default_headers": {
**(self.default_headers or {}),
"User-Agent": "langchain-partner-python-azure-openai",
},
"default_query": self.default_query,
}
if self.max_retries is not None:
client_params["max_retries"] = self.max_retries
if not self.client:
sync_specific = {"http_client": self.http_client}
self.root_client = openai.AzureOpenAI(**client_params, **sync_specific) # type: ignore[arg-type]
self.client = self.root_client.chat.completions
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
if self.azure_ad_async_token_provider:
client_params["azure_ad_token_provider"] = (
self.azure_ad_async_token_provider
)
self.root_async_client = openai.AsyncAzureOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
)
self.async_client = self.root_async_client.chat.completions
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None and self.deployment_name is not None:
self.profile = _get_default_model_profile(self.deployment_name)
return self
@property
def _identifying_params(self) -> dict[str, Any]:
"""Get the identifying parameters."""
return {
"azure_deployment": self.deployment_name,
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
@property
def lc_attributes(self) -> dict[str, Any]:
"""Get the attributes relevant to tracing."""
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling Azure OpenAI API."""
params = super()._default_params
if "max_tokens" in params:
params["max_completion_tokens"] = params.pop("max_tokens")
return params
def _get_ls_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "azure"
if self.model_name:
if self.model_version and self.model_version not in self.model_name:
params["ls_model_name"] = (
self.model_name + "-" + self.model_version.lstrip("-")
)
else:
params["ls_model_name"] = self.model_name
elif self.deployment_name:
params["ls_model_name"] = self.deployment_name
return params
def _create_chat_result(
self,
response: dict | openai.BaseModel,
generation_info: dict | None = None,
) -> ChatResult:
chat_result = super()._create_chat_result(response, generation_info)
if not isinstance(response, dict):
response = response.model_dump()
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
msg = (
"Azure has not provided the response due to a content filter "
"being triggered"
)
raise ValueError(msg)
if "model" in response:
model = response["model"]
if self.model_version:
model = f"{model}-{self.model_version}"
chat_result.llm_output = chat_result.llm_output or {}
chat_result.llm_output["model_name"] = model
if "prompt_filter_results" in response:
chat_result.llm_output = chat_result.llm_output or {}
chat_result.llm_output["prompt_filter_results"] = response[
"prompt_filter_results"
]
for chat_gen, response_choice in zip(
chat_result.generations, response["choices"], strict=False
):
chat_gen.generation_info = chat_gen.generation_info or {}
chat_gen.generation_info["content_filter_results"] = response_choice.get(
"content_filter_results", {}
)
return chat_result
def _get_request_payload(
self,
input_: LanguageModelInput,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> dict:
"""Get the request payload, using deployment name for Azure Responses API."""
payload = super()._get_request_payload(input_, stop=stop, **kwargs)
# For Azure Responses API, use deployment name instead of model name
if (
self._use_responses_api(payload)
and not payload.get("model")
and self.deployment_name
):
payload["model"] = self.deployment_name
return payload
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
"""Route to Chat Completions or Responses API."""
if self._use_responses_api({**kwargs, **self.model_kwargs}):
return super()._stream_responses(*args, **kwargs)
return super()._stream(*args, **kwargs)
async def _astream(
self, *args: Any, **kwargs: Any
) -> AsyncIterator[ChatGenerationChunk]:
"""Route to Chat Completions or Responses API."""
if self._use_responses_api({**kwargs, **self.model_kwargs}):
async for chunk in super()._astream_responses(*args, **kwargs):
yield chunk
else:
async for chunk in super()._astream(*args, **kwargs):
yield chunk
def with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
include_raw: bool = False,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, _DictOrPydantic]:
r"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- A JSON Schema,
- A `TypedDict` class,
- A Pydantic class,
- Or an OpenAI function/tool schema.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
method: The method for steering model generation, one of:
- `'json_schema'`:
Uses OpenAI's [Structured Output API](https://platform.openai.com/docs/guides/structured-outputs).
Supported for `'gpt-4o-mini'`, `'gpt-4o-2024-08-06'`, `'o1'`, and later
models.
- `'function_calling'`:
Uses OpenAI's tool-calling (formerly called function calling)
[API](https://platform.openai.com/docs/guides/function-calling)
- `'json_mode'`:
Uses OpenAI's [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode).
Note that if using JSON mode then you must include instructions for
formatting the output into the desired schema into the model call
Learn more about the differences between the methods and which models
support which methods [here](https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format).
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
strict:
- True:
Model output is guaranteed to exactly match the schema.
The input schema will also be validated according to the [supported schemas](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas?api-mode=responses#supported-schemas).
- False:
Input schema will not be validated and model output will not be
validated.
- None:
`strict` argument will not be passed to the model.
If schema is specified via TypedDict or JSON schema, `strict` is not
enabled by default. Pass `strict=True` to enable it.
!!! note
`strict` can only be non-null if `method` is `'json_schema'`
or `'function_calling'`.
kwargs: Additional keyword args are passed through to the model.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
!!! warning "Behavior changed in `langchain-openai` 0.3.0"
`method` default changed from "function_calling" to "json_schema".
!!! warning "Behavior changed in `langchain-openai` 0.3.12"
Support for `tools` added.
!!! warning "Behavior changed in `langchain-openai` 0.3.21"
Pass `kwargs` through to the model.
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=False`, `strict=True`"
Note, OpenAI has a number of restrictions on what types of schemas can be
provided if `strict` = True. When using Pydantic, our model cannot
specify any Field metadata (like min/max constraints) and fields cannot
have default values.
See all constraints [here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).
```python
from typing import Optional
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
??? note "Example: `schema=Pydantic` class, `method='function_calling'`, `include_raw=False`, `strict=False`"
```python
from typing import Optional
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_model = model.with_structured_output(
AnswerWithJustification, method="function_calling"
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=True`"
```python
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_model = model.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
??? note "Example: `schema=TypedDict` class, `method='json_schema'`, `include_raw=False`, `strict=False`"
```python
from typing_extensions import Annotated, TypedDict
from langchain_openai import AzureChatOpenAI
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[
str | None, None, "A justification for the answer."
]
model = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
??? note "Example: `schema=OpenAI` function schema, `method='json_schema'`, `include_raw=False`"
```python
from langchain_openai import AzureChatOpenAI
oai_schema = {
'name': 'AnswerWithJustification',
'description': 'An answer to the user question along with justification for the answer.',
'parameters': {
'type': 'object',
'properties': {
'answer': {'type': 'string'},
'justification': {'description': 'A justification for the answer.', 'type': 'string'}
},
'required': ['answer']
}
model = AzureChatOpenAI(
azure_deployment="...",
model="gpt-4o",
temperature=0,
)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
??? note "Example: `schema=Pydantic` class, `method='json_mode'`, `include_raw=True`"
```python
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = AzureChatOpenAI(
azure_deployment="...",
model="gpt-4o",
temperature=0,
)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'),
# 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
# 'parsing_error': None
# }
```
??? note "Example: `schema=None`, `method='json_mode'`, `include_raw=True`"
```python
structured_model = model.with_structured_output(
method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'),
# 'parsed': {
# 'answer': 'They are both the same weight.',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
# },
# 'parsing_error': None
# }
```
""" # noqa: E501
return super().with_structured_output(
schema, method=method, include_raw=include_raw, strict=strict, **kwargs
)
| AzureChatOpenAI |
python | tox-dev__tox | src/tox/execute/local_sub_process/__init__.py | {
"start": 1434,
"end": 1745
} | class ____(Execute):
def build_instance( # noqa: PLR6301
self,
request: ExecuteRequest,
options: ExecuteOptions,
out: SyncWrite,
err: SyncWrite,
) -> ExecuteInstance:
return LocalSubProcessExecuteInstance(request, options, out, err)
| LocalSubProcessExecutor |
python | getsentry__sentry-python | sentry_sdk/integrations/falcon.py | {
"start": 3679,
"end": 9501
} | class ____(Integration):
identifier = "falcon"
origin = f"auto.http.{identifier}"
transaction_style = ""
def __init__(self, transaction_style="uri_template"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(FALCON_VERSION)
_check_minimum_version(FalconIntegration, version)
_patch_wsgi_app()
_patch_handle_exception()
_patch_prepare_middleware()
def _patch_wsgi_app():
# type: () -> None
original_wsgi_app = falcon_app_class.__call__
def sentry_patched_wsgi_app(self, env, start_response):
# type: (falcon.API, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(FalconIntegration)
if integration is None:
return original_wsgi_app(self, env, start_response)
sentry_wrapped = SentryWsgiMiddleware(
lambda envi, start_resp: original_wsgi_app(self, envi, start_resp),
span_origin=FalconIntegration.origin,
)
return sentry_wrapped(env, start_response)
falcon_app_class.__call__ = sentry_patched_wsgi_app
def _patch_handle_exception():
# type: () -> None
original_handle_exception = falcon_app_class._handle_exception
@ensure_integration_enabled(FalconIntegration, original_handle_exception)
def sentry_patched_handle_exception(self, *args):
# type: (falcon.API, *Any) -> Any
# NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
# method signature from `(ex, req, resp, params)` to
# `(req, resp, ex, params)`
ex = response = None
with capture_internal_exceptions():
ex = next(argument for argument in args if isinstance(argument, Exception))
response = next(
argument for argument in args if isinstance(argument, falcon.Response)
)
was_handled = original_handle_exception(self, *args)
if ex is None or response is None:
# Both ex and response should have a non-None value at this point; otherwise,
# there is an error with the SDK that will have been captured in the
# capture_internal_exceptions block above.
return was_handled
if _exception_leads_to_http_5xx(ex, response):
event, hint = event_from_exception(
ex,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "falcon", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
return was_handled
falcon_app_class._handle_exception = sentry_patched_handle_exception
def _patch_prepare_middleware():
# type: () -> None
original_prepare_middleware = falcon_helpers.prepare_middleware
def sentry_patched_prepare_middleware(
middleware=None, independent_middleware=False, asgi=False
):
# type: (Any, Any, bool) -> Any
if asgi:
# We don't support ASGI Falcon apps, so we don't patch anything here
return original_prepare_middleware(middleware, independent_middleware, asgi)
integration = sentry_sdk.get_client().get_integration(FalconIntegration)
if integration is not None:
middleware = [SentryFalconMiddleware()] + (middleware or [])
# We intentionally omit the asgi argument here, since the default is False anyways,
# and this way, we remain backwards-compatible with pre-3.0.0 Falcon versions.
return original_prepare_middleware(middleware, independent_middleware)
falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware
def _exception_leads_to_http_5xx(ex, response):
# type: (Exception, falcon.Response) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
# We only check the HTTP status on Falcon 3 because in Falcon 2, the status on the response
# at the stage where we capture it is listed as 200, even though we would expect to see a 500
# status. Since at the time of this change, Falcon 2 is ca. 4 years old, we have decided to
# only perform this check on Falcon 3+, despite the risk that some handled errors might be
# reported to Sentry as unhandled on Falcon 2.
return (is_server_error or is_unhandled_error) and (
not FALCON3 or _has_http_5xx_status(response)
)
def _has_http_5xx_status(response):
# type: (falcon.Response) -> bool
return response.status.startswith("5")
def _set_transaction_name_and_source(event, transaction_style, request):
# type: (Event, str, falcon.Request) -> None
name_for_style = {
"uri_template": request.uri_template,
"path": request.path,
}
event["transaction"] = name_for_style[transaction_style]
event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
def _make_request_event_processor(req, integration):
# type: (falcon.Request, FalconIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Event, dict[str, Any]) -> Event
_set_transaction_name_and_source(event, integration.transaction_style, req)
with capture_internal_exceptions():
FalconRequestExtractor(req).extract_into_event(event)
return event
return event_processor
| FalconIntegration |
python | PyCQA__pylint | tests/functional/r/regression/regression_4439.py | {
"start": 364,
"end": 479
} | class ____:
name: str = attrib()
age: int = attrib()
occupation = Optional[str] = attrib(default=None)
| User |
python | pydantic__pydantic | pydantic/json_schema.py | {
"start": 3284,
"end": 5077
} | class ____(UserWarning):
"""This class is used to emit warnings produced during JSON schema generation.
See the [`GenerateJsonSchema.emit_warning`][pydantic.json_schema.GenerateJsonSchema.emit_warning] and
[`GenerateJsonSchema.render_warning_message`][pydantic.json_schema.GenerateJsonSchema.render_warning_message]
methods for more details; these can be overridden to control warning behavior.
"""
NoDefault = object()
"""A sentinel value used to indicate that no default value should be used when generating a JSON Schema
for a core schema with a default value.
"""
# ##### JSON Schema Generation #####
DEFAULT_REF_TEMPLATE = '#/$defs/{model}'
"""The default format string used to generate reference names."""
# There are three types of references relevant to building JSON schemas:
# 1. core_schema "ref" values; these are not exposed as part of the JSON schema
# * these might look like the fully qualified path of a model, its id, or something similar
CoreRef = NewType('CoreRef', str)
# 2. keys of the "definitions" object that will eventually go into the JSON schema
# * by default, these look like "MyModel", though may change in the presence of collisions
# * eventually, we may want to make it easier to modify the way these names are generated
DefsRef = NewType('DefsRef', str)
# 3. the values corresponding to the "$ref" key in the schema
# * By default, these look like "#/$defs/MyModel", as in {"$ref": "#/$defs/MyModel"}
JsonRef = NewType('JsonRef', str)
CoreModeRef = tuple[CoreRef, JsonSchemaMode]
JsonSchemaKeyT = TypeVar('JsonSchemaKeyT', bound=Hashable)
_PRIMITIVE_JSON_SCHEMA_TYPES = ('string', 'boolean', 'null', 'integer', 'number')
@dataclasses.dataclass(**_internal_dataclass.slots_true)
| PydanticJsonSchemaWarning |
python | spack__spack | lib/spack/spack/modules/common.py | {
"start": 37858,
"end": 38032
} | class ____(AttributeError, ModulesError):
"""Raised if the attribute ``modulerc_header`` has not been specified
in the derived classes.
"""
| ModulercHeaderNotDefined |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 48056,
"end": 49621
} | class ____(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc),
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2018-08-16 22:00-24:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
'9999-12-31T21:59:59.99990-03:00': ['Datetime value out of range.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00Z',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc): '2001-01-01T13:00:00Z',
'2001-01-01T00:00:00': '2001-01-01T00:00:00',
'2016-01-10T00:00:00': '2016-01-10T00:00:00',
None: None,
'': None,
}
field = serializers.DateTimeField(default_timezone=utc)
| TestDateTimeField |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.