language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Pylons__pyramid | tests/test_url.py | {
"start": 46885,
"end": 47629
} | class ____(unittest.TestCase):
def _callFUT(self, request, *elements, **kw):
from pyramid.url import current_route_url
return current_route_url(request, *elements, **kw)
def _makeRequest(self):
class Request:
def current_route_url(self, *elements, **kw):
self.elements = elements
self.kw = kw
return 'current route url'
return Request()
def test_it(self):
request = self._makeRequest()
result = self._callFUT(request, 'abc', _app_url='')
self.assertEqual(result, 'current route url')
self.assertEqual(request.elements, ('abc',))
self.assertEqual(request.kw, {'_app_url': ''})
| Test_current_route_url |
python | google__jax | jax/_src/interpreters/ad.py | {
"start": 27444,
"end": 34957
} | class ____(Trace):
def __init__(self, parent_trace, tag):
super().__init__()
self.tag = tag
self.parent_trace = parent_trace
self.requires_low = False
def to_primal_tangent_pair(self, val):
if isinstance(val, JVPTracer) and val._trace.tag is self.tag:
return (val.primal, val.tangent)
else:
tangent_zero = Zero.from_primal_value(val)
return (val, tangent_zero)
def process_primitive(self, primitive, tracers, params):
primals_in, tangents_in = unzip2(map(self.to_primal_tangent_pair, tracers))
if (all(type(t) is Zero for t in tangents_in) and
primitive is not core.ref_p and
not any(isinstance(core.typeof(x), AbstractRef) for x in primals_in)):
return primitive.bind_with_trace(self.parent_trace, primals_in, params)
jvp = primitive_jvps.get(primitive)
if not jvp:
msg = f"Differentiation rule for '{primitive}' not implemented"
raise NotImplementedError(msg)
with core.set_current_trace(self.parent_trace):
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
if primitive.multiple_results:
return [maybe_jvp_tracer(self, x, t) for x, t in zip(primal_out, tangent_out)]
else:
return maybe_jvp_tracer(self, primal_out, tangent_out)
def cur_qdd(self, x):
p, _ = self.to_primal_tangent_pair(x)
with core.set_current_trace(self.parent_trace):
return core.cur_qdd(p)
def process_call(self, call_primitive, f, tracers, params):
assert call_primitive.multiple_results
primals, tangents = unzip2(map(self.to_primal_tangent_pair, tracers))
which_nz = [ type(t) is not Zero for t in tangents]
tangents = [t if type(t) is not Zero else None for t in tangents]
args, in_tree = tree_flatten((primals, tangents))
f_jvp = jvp_subtrace(f, self.tag)
f_jvp, which_nz_out = nonzero_tangent_outputs(f_jvp)
if isinstance(call_primitive, core.MapPrimitive):
in_axes = params['in_axes']
tangent_in_axes = [ax for ax, nz in zip(in_axes, which_nz) if nz]
out_axes_thunk = params['out_axes_thunk']
# NOTE: This assumes that the output tangents being zero is a
# deterministic function of which input tangents were zero.
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
out_ax = out_axes_thunk()
return (*out_ax, *(ax for ax, nz in zip(out_ax, which_nz_out()) if nz))
params = dict(params, in_axes=(*in_axes, *tangent_in_axes),
out_axes_thunk=new_out_axes_thunk)
f_jvp, out_tree = traceable(f_jvp, in_tree)
update_params = call_param_updaters.get(call_primitive)
new_params = update_params(params, which_nz) if update_params else params
fun_and_args = (_update_annotation(f_jvp.with_unknown_names(), f.in_type, which_nz),) + tuple(args)
result = call_primitive.bind_with_trace(self.parent_trace, fun_and_args, new_params)
primal_out, tangent_out = tree_unflatten(out_tree(), result)
tangent_out = [Zero.from_primal_value(p) if t is None else t
for p, t in zip(primal_out, tangent_out)]
return [maybe_jvp_tracer(self, p, t) for p, t in zip(primal_out, tangent_out)]
# The only difference between process_map and process_call is that
# the `in_axes` and `out_axes_thunk` params must be updated;
# that's handled in process_call.
process_map = process_call
def process_custom_jvp_call(self, prim, fun, f_jvp, tracers, *, symbolic_zeros):
primals_in, tangents_in = unzip2(map(self.to_primal_tangent_pair, tracers))
if all(type(t) is Zero for t in tangents_in):
return prim.bind_with_trace(self.parent_trace, (fun, f_jvp, *primals_in),
dict(symbolic_zeros=symbolic_zeros))
with core.set_current_trace(self.parent_trace):
if not symbolic_zeros:
tangents_in = map(instantiate_zeros, tangents_in)
else:
tangents_in = map(replace_internal_symbolic_zeros, tangents_in)
outs = f_jvp.call_wrapped(*(tuple(primals_in) + tuple(tangents_in)))
primals_out, tangents_out = split_list(outs, [len(outs) // 2])
tangents_out = map(replace_rule_output_symbolic_zeros, tangents_out)
return map(partial(maybe_jvp_tracer, self), primals_out, tangents_out)
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees,
symbolic_zeros):
primals_in, tangents_in = unzip2(map(self.to_primal_tangent_pair, tracers))
if all(type(t) is Zero for t in tangents_in):
return prim.bind_with_trace(self.parent_trace,
(fun, fwd, bwd, *primals_in),
dict(out_trees=out_trees, symbolic_zeros=symbolic_zeros))
fwd_in = [(p, type(t) is not Zero) for p, t in zip(primals_in, tangents_in)]
fwd_in = [x for pair in fwd_in for x in pair] # flatten
with core.set_current_trace(self.parent_trace):
res_and_primals_out = fwd.call_wrapped(*fwd_in)
_, res_tree, input_fwds = out_trees()
num_res_out = res_tree.num_leaves - sum(f is not None for f in input_fwds)
res_out, primals_out = split_list(res_and_primals_out, [num_res_out])
res_out_ = iter(res_out)
res = [next(res_out_) if f is None else primals_in[f] for f in input_fwds]
assert next(res_out_, None) is None
avals_out = [core.get_aval(x).to_tangent_aval() for x in primals_out]
in_zeros = [type(t) is Zero for t in tangents_in]
nz_tangents_in = [t for z, t in zip(in_zeros, tangents_in) if not z]
with core.set_current_trace(self.parent_trace):
tangents_out = custom_lin_p.bind(
*res, *nz_tangents_in, num_res=res_tree.num_leaves, bwd=bwd,
out_avals=avals_out, symbolic_zeros=symbolic_zeros, in_zeros=in_zeros)
return map(partial(maybe_jvp_tracer, self), primals_out, tangents_out)
def process_custom_transpose(self, prim, call, tracers, **params):
ps_in, ts_in = unzip2(map(self.to_primal_tangent_pair, tracers))
res_ps_in, lin_ps_in = split_list(ps_in, [params['res_tree'].num_leaves])
res_ts_in, lin_ts_in = split_list(ts_in, [params['res_tree'].num_leaves])
# TODO(frostig): Handle differentiation with respect to residual
# operands. Calling `call` twice on all operands invalid, since it
# isn't linear in the residuals. However, we know that if we
# write:
#
# jvp_call_res = lambda x: partial(jvp, lambda r: call(r, x))
#
# then:
#
# jvp(call, (r, x), (dr, dx)) == jvp_call_res(x)(r, dr) + call(r, dx)
#
# In words: a possible strategy is to take the jvp of `call` with
# respect to residuals, and with linear arguments fixed, then add
# that to a custom-transpose call to `call` (i.e. what we already
# do below in the all-linear argument case).
if any(type(t) is not Zero for t in res_ts_in):
raise NotImplementedError(
'JVP of custom transpose with respect to non-symbolic-zero residuals')
with core.set_current_trace(self.parent_trace):
ps_out = prim.bind(call, *ps_in, **params)
lin_ts_in = map(instantiate_zeros, lin_ts_in)
ts_out = prim.bind(call, *res_ps_in, *lin_ts_in, **params)
return map(partial(maybe_jvp_tracer, self), ps_out, ts_out)
def maybe_jvp_tracer(trace, primal, tangent):
if (type(tangent) is Zero or
isinstance(core.typeof(tangent), core.ShapedArray)
and dtype(tangent) == float0):
return primal
else:
return JVPTracer(trace, primal, tangent)
| JVPTrace |
python | pytorch__pytorch | test/quantization/fx/test_numeric_suite_fx.py | {
"start": 32474,
"end": 39383
} | class ____(QuantizationTestCase):
def _test_extract_weights(
self, m, example_inputs, results_len=0, qconfig_dict=None, prepare_fn=prepare_fx
):
m = torch.fx.symbolic_trace(m)
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# test both the public API as well as the internal GraphModule API
for extract_weights_fun in (extract_weights, _extract_weights_impl):
# test both m vs mp and mp vs mq
for m1, m2 in ((m, mp), (mp, mq)):
results = extract_weights_fun('a', m1, 'b', m2)
self.assertTrue(
len(results) == results_len,
f"expected len {results_len}, got len {len(results)}")
self.assert_ns_compare_dict_valid(results)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
def _test_match_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=0,
should_log_inputs=False,
qconfig_dict=None,
skip_scripting=False,
prepare_fn=prepare_fx,
):
if qconfig_dict is None:
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping()
if prepare_fn is prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=data)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
m_ns, mp_ns2 = add_loggers(
'a', m, 'b', copy.deepcopy(mp), OutputLogger,
should_log_inputs=should_log_inputs)
mp_ns, mq_ns = add_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
self.checkGraphModuleNodes(
m_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns2, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
m_ns = torch.jit.script(m_ns)
mp_ns = torch.jit.script(mp_ns)
mq_ns = torch.jit.script(mq_ns)
# calibrate
m_ns(*data)
mp_ns2(*data)
mp_ns(*data)
mq_ns(*data)
# check activation result correctness
results = []
for m1, m2 in ((m_ns, mp_ns2), (mp_ns, mq_ns)):
act_compare_dict = extract_logger_info(
m1, m2, OutputLogger, 'b')
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
def _test_match_shadow_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=None,
should_log_inputs=False, qconfig_dict=None, skip_scripting=False,
prepare_fn=prepare_fx, compare_fp32_vs_fp32_prepared=True,
):
if qconfig_dict is None:
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping()
if prepare_fn is prepare_fx:
m.eval()
else:
m.train()
print("qconfig_dict:", qconfig_dict)
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=data)
print("prepared:", mp)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
print("quantized:", mq)
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = add_shadow_loggers(
'a', copy.deepcopy(m), 'b', copy.deepcopy(mp),
OutputLogger, should_log_inputs=should_log_inputs)
mp_shadows_mq = add_shadow_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
if compare_fp32_vs_fp32_prepared:
self.checkGraphModuleNodes(
m_shadows_mp, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_shadows_mq, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = torch.jit.script(m_shadows_mp)
mp_shadows_mq = torch.jit.script(mp_shadows_mq)
# calibrate
if compare_fp32_vs_fp32_prepared:
m_shadows_mp(*data)
mp_shadows_mq(*data)
# check activation result correctness
results = []
models = (m_shadows_mp, mp_shadows_mq) if \
compare_fp32_vs_fp32_prepared else (mp_shadows_mq,)
for model in models:
act_compare_dict = extract_shadow_logger_info(
model, OutputLogger, 'b')
if results_len is not None:
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
| FXNumericSuiteQuantizationTestCase |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass.py | {
"start": 1572,
"end": 1683
} | class ____( # [invalid-metaclass]
Path, Proto, metaclass=MetaclassWithInconsistentMRO
):
pass
| SixthInvalid |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/manip_ops_test.py | {
"start": 1319,
"end": 8104
} | class ____(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.cached_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll, expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.cached_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
# Expected y's shape to be the same
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
@test_util.run_deprecated_v1
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
@test_util.run_deprecated_v1
def testFloatTypes(self):
for t in [np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
@test_util.run_deprecated_v1
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
@test_util.run_deprecated_v1
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis should be 0 <= axis + dims < dims
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
@test_util.run_deprecated_v1
def testEmptyInput(self):
self._testAll(np.zeros([0, 1]), 1, 1)
self._testAll(np.zeros([1, 0]), 1, 1)
@test_util.run_v2_only
def testLargeInput(self):
with test_util.force_cpu():
# Num elements just over INT_MAX for int32 to ensure no overflow
np_input = np.arange(0, 128 * 524289 * 33, dtype=np.int8).reshape(
128, -1, 33
)
for shift in range(-5, 5):
roll = manip_ops.roll(np_input, shift, 0)
self.assertAllEqual(roll[shift], np_input[0], msg=f"shift={shift}")
self.assertAllEqual(roll[0], np_input[-shift], msg=f"shift={shift}")
@test_util.run_deprecated_v1
def testInvalidInputShape(self):
# The input should be 1-D or higher, checked in shape function.
with self.assertRaisesRegex(
ValueError, "Shape must be at least rank 1 but is rank 0"
):
manip_ops.roll(7, 1, 0)
@test_util.run_deprecated_v1
def testRollInputMustVectorHigherRaises(self):
# The input should be 1-D or higher, checked in kernel.
tensor = array_ops.placeholder(dtype=dtypes.int32)
shift = 1
axis = 0
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
@test_util.run_deprecated_v1
def testInvalidAxisShape(self):
# The axis should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegex(ValueError,
"Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
@test_util.run_deprecated_v1
def testRollAxisMustBeScalarOrVectorRaises(self):
# The axis should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = 1
axis = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
@test_util.run_deprecated_v1
def testInvalidShiftShape(self):
# The shift should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegex(ValueError,
"Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
@test_util.run_deprecated_v1
def testRollShiftMustBeScalarOrVectorRaises(self):
# The shift should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = 1
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
@test_util.run_deprecated_v1
def testInvalidShiftAndAxisNotEqualShape(self):
# The shift and axis must be same size, checked in shape function.
with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
@test_util.run_deprecated_v1
def testRollShiftAndAxisMustBeSameSizeRaises(self):
# The shift and axis must be same size, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = [0, 1]
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
| RollTest |
python | plotly__plotly.py | plotly/io/_json.py | {
"start": 340,
"end": 18934
} | class ____(object):
_valid_engines = ("json", "orjson", "auto")
def __init__(self):
self._default_engine = "auto"
@property
def default_engine(self):
return self._default_engine
@default_engine.setter
def default_engine(self, val):
if val not in JsonConfig._valid_engines:
raise ValueError(
"Supported JSON engines include {valid}\n Received {val}".format(
valid=JsonConfig._valid_engines, val=val
)
)
if val == "orjson":
self.validate_orjson()
self._default_engine = val
@classmethod
def validate_orjson(cls):
orjson = get_module("orjson")
if orjson is None:
raise ValueError("The orjson engine requires the orjson package")
config = JsonConfig()
def coerce_to_strict(const):
"""
This is used to ultimately *encode* into strict JSON, see `encode`
"""
# before python 2.7, 'true', 'false', 'null', were include here.
if const in ("Infinity", "-Infinity", "NaN"):
return None
else:
return const
_swap_json = (
("<", "\\u003c"),
(">", "\\u003e"),
("/", "\\u002f"),
)
_swap_orjson = _swap_json + (
("\u2028", "\\u2028"),
("\u2029", "\\u2029"),
)
def _safe(json_str, _swap):
out = json_str
for unsafe_char, safe_char in _swap:
if unsafe_char in out:
out = out.replace(unsafe_char, safe_char)
return out
def to_json_plotly(plotly_object, pretty=False, engine=None):
"""
Convert a plotly/Dash object to a JSON string representation
Parameters
----------
plotly_object:
A plotly/Dash object represented as a dict, graph_object, or Dash component
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
str
Representation of input object as a JSON string
See Also
--------
to_json : Convert a plotly Figure to JSON with validation
"""
orjson = get_module("orjson", should_load=True)
# Determine json engine
if engine is None:
engine = config.default_engine
if engine == "auto":
if orjson is not None:
engine = "orjson"
else:
engine = "json"
elif engine not in ["orjson", "json"]:
raise ValueError("Invalid json engine: %s" % engine)
modules = {
"sage_all": get_module("sage.all", should_load=False),
"np": get_module("numpy", should_load=False),
"pd": get_module("pandas", should_load=False),
"image": get_module("PIL.Image", should_load=False),
}
# Dump to a JSON string and return
# --------------------------------
if engine == "json":
opts = {}
if pretty:
opts["indent"] = 2
else:
# Remove all whitespace
opts["separators"] = (",", ":")
from _plotly_utils.utils import PlotlyJSONEncoder
return _safe(
json.dumps(plotly_object, cls=PlotlyJSONEncoder, **opts), _swap_json
)
elif engine == "orjson":
JsonConfig.validate_orjson()
opts = orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY
if pretty:
opts |= orjson.OPT_INDENT_2
# Plotly
try:
plotly_object = plotly_object.to_plotly_json()
except AttributeError:
pass
# Try without cleaning
try:
return _safe(
orjson.dumps(plotly_object, option=opts).decode("utf8"), _swap_orjson
)
except TypeError:
pass
cleaned = clean_to_json_compatible(
plotly_object,
numpy_allowed=True,
datetime_allowed=True,
modules=modules,
)
return _safe(orjson.dumps(cleaned, option=opts).decode("utf8"), _swap_orjson)
def to_json(fig, validate=True, pretty=False, remove_uids=True, engine=None):
"""
Convert a figure to a JSON string representation
Parameters
----------
fig:
Figure object or dict representing a figure
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
str
Representation of figure as a JSON string
See Also
--------
to_json_plotly : Convert an arbitrary plotly graph_object or Dash component to JSON
"""
# Validate figure
# ---------------
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# Remove trace uid
# ----------------
if remove_uids:
for trace in fig_dict.get("data", []):
trace.pop("uid", None)
return to_json_plotly(fig_dict, pretty=pretty, engine=engine)
def write_json(fig, file, validate=True, pretty=False, remove_uids=True, engine=None):
"""
Convert a figure to JSON and write it to a file or writeable
object.
Note: A figure converted to JSON with one version of Plotly.py may not be compatible with another version.
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. a pathlib.Path object or an open file descriptor)
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
None
"""
# Get JSON string
# ---------------
# Pass through validate argument and let to_json handle validation logic
json_str = to_json(
fig, validate=validate, pretty=pretty, remove_uids=remove_uids, engine=engine
)
# Try to cast `file` as a pathlib object `path`.
# ----------------------------------------------
if isinstance(file, str):
# Use the standard Path constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path):
# `file` is already a Path object.
path = file
else:
# We could not make a Path object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Open file
# ---------
if path is None:
# We previously failed to make sense of `file` as a pathlib object.
# Attempt to write to `file` as an open file descriptor.
try:
file.write(json_str)
return
except AttributeError:
pass
raise ValueError(
"""
The 'file' argument '{file}' is not a string, pathlib.Path object, or file descriptor.
""".format(file=file)
)
else:
# We previously succeeded in interpreting `file` as a pathlib object.
# Now we can use `write_bytes()`.
path.write_text(json_str)
def from_json_plotly(value, engine=None):
"""
Parse JSON string using the specified JSON engine
Parameters
----------
value: str or bytes
A JSON string or bytes object
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
dict
See Also
--------
from_json_plotly : Parse JSON with plotly conventions into a dict
"""
orjson = get_module("orjson", should_load=True)
# Validate value
# --------------
if not isinstance(value, (str, bytes)):
raise ValueError(
"""
from_json_plotly requires a string or bytes argument but received value of type {typ}
Received value: {value}""".format(typ=type(value), value=value)
)
# Determine json engine
if engine is None:
engine = config.default_engine
if engine == "auto":
if orjson is not None:
engine = "orjson"
else:
engine = "json"
elif engine not in ["orjson", "json"]:
raise ValueError("Invalid json engine: %s" % engine)
if engine == "orjson":
JsonConfig.validate_orjson()
# orjson handles bytes input natively
value_dict = orjson.loads(value)
else:
# decode bytes to str for built-in json module
if isinstance(value, bytes):
value = value.decode("utf-8")
value_dict = json.loads(value)
return value_dict
def from_json(value, output_type="Figure", skip_invalid=False, engine=None):
"""
Construct a figure from a JSON string
Parameters
----------
value: str or bytes
String or bytes object containing the JSON representation of a figure
output_type: type or str (default 'Figure')
The output figure type or type name.
One of: graph_objs.Figure, 'Figure', graph_objs.FigureWidget, 'FigureWidget'
skip_invalid: bool (default False)
False if invalid figure properties should result in an exception.
True if invalid figure properties should be silently ignored.
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Raises
------
ValueError
if value is not a string, or if skip_invalid=False and value contains
invalid figure properties
Returns
-------
Figure or FigureWidget
"""
# Decode JSON
# -----------
fig_dict = from_json_plotly(value, engine=engine)
# Validate coerce output type
# ---------------------------
cls = validate_coerce_output_type(output_type)
# Create and return figure
# ------------------------
fig = cls(fig_dict, skip_invalid=skip_invalid)
return fig
def read_json(file, output_type="Figure", skip_invalid=False, engine=None):
"""
Construct a figure from the JSON contents of a local file or readable
Python object.
Note: A figure converted to JSON with one version of Plotly.py may not be compatible with another version.
Parameters
----------
file: str or readable
A string containing the path to a local file or a read-able Python
object (e.g. a pathlib.Path object or an open file descriptor)
output_type: type or str (default 'Figure')
The output figure type or type name.
One of: graph_objs.Figure, 'Figure', graph_objs.FigureWidget, 'FigureWidget'
skip_invalid: bool (default False)
False if invalid figure properties should result in an exception.
True if invalid figure properties should be silently ignored.
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
Figure or FigureWidget
"""
# Try to cast `file` as a pathlib object `path`.
if isinstance(file, str):
# Use the standard Path constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path):
# `file` is already a Path object.
path = file
else:
# We could not make a Path object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Read file contents into JSON string
# -----------------------------------
if path is not None:
json_str = path.read_text()
else:
json_str = file.read()
# Construct and return figure
# ---------------------------
return from_json(
json_str, skip_invalid=skip_invalid, output_type=output_type, engine=engine
)
def clean_to_json_compatible(obj, **kwargs):
# Try handling value as a scalar value that we have a conversion for.
# Return immediately if we know we've hit a primitive value
# Bail out fast for simple scalar types
if isinstance(obj, (int, float, str)):
return obj
if isinstance(obj, dict):
return {k: clean_to_json_compatible(v, **kwargs) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
if obj:
# Must process list recursively even though it may be slow
return [clean_to_json_compatible(v, **kwargs) for v in obj]
# unpack kwargs
numpy_allowed = kwargs.get("numpy_allowed", False)
datetime_allowed = kwargs.get("datetime_allowed", False)
modules = kwargs.get("modules", {})
sage_all = modules["sage_all"]
np = modules["np"]
pd = modules["pd"]
image = modules["image"]
# Sage
if sage_all is not None:
if obj in sage_all.RR:
return float(obj)
elif obj in sage_all.ZZ:
return int(obj)
# numpy
if np is not None:
if obj is np.ma.core.masked:
return float("nan")
elif isinstance(obj, np.ndarray):
if numpy_allowed and obj.dtype.kind in ("b", "i", "u", "f"):
return np.ascontiguousarray(obj)
elif obj.dtype.kind == "M":
# datetime64 array
return np.datetime_as_string(obj).tolist()
elif obj.dtype.kind == "U":
return obj.tolist()
elif obj.dtype.kind == "O":
# Treat object array as a lists, continue processing
obj = obj.tolist()
elif isinstance(obj, np.datetime64):
return str(obj)
# pandas
if pd is not None:
if obj is pd.NaT or obj is pd.NA:
return None
elif isinstance(obj, (pd.Series, pd.DatetimeIndex)):
if numpy_allowed and obj.dtype.kind in ("b", "i", "u", "f"):
return np.ascontiguousarray(obj.values)
elif obj.dtype.kind == "M":
if isinstance(obj, pd.Series):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# Series.dt.to_pydatetime will return Index[object]
# https://github.com/pandas-dev/pandas/pull/52459
dt_values = np.array(obj.dt.to_pydatetime()).tolist()
else: # DatetimeIndex
dt_values = obj.to_pydatetime().tolist()
if not datetime_allowed:
# Note: We don't need to handle dropping timezones here because
# numpy's datetime64 doesn't support them and pandas's tz_localize
# above drops them.
for i in range(len(dt_values)):
dt_values[i] = dt_values[i].isoformat()
return dt_values
# datetime and date
try:
# Need to drop timezone for scalar datetimes. Don't need to convert
# to string since engine can do that
obj = obj.to_pydatetime()
except (TypeError, AttributeError):
pass
if not datetime_allowed:
try:
return obj.isoformat()
except (TypeError, AttributeError):
pass
elif isinstance(obj, datetime.datetime):
return obj
# Try .tolist() convertible, do not recurse inside
try:
return obj.tolist()
except AttributeError:
pass
# Do best we can with decimal
if isinstance(obj, decimal.Decimal):
return float(obj)
# PIL
if image is not None and isinstance(obj, image.Image):
return ImageUriValidator.pil_image_to_uri(obj)
# Plotly
try:
obj = obj.to_plotly_json()
except AttributeError:
pass
# Recurse into lists and dictionaries
if isinstance(obj, dict):
return {k: clean_to_json_compatible(v, **kwargs) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
if obj:
# Must process list recursively even though it may be slow
return [clean_to_json_compatible(v, **kwargs) for v in obj]
return obj
| JsonConfig |
python | getsentry__sentry | tests/sentry/workflow_engine/migrations/test_0089_update_cron_workflow_names.py | {
"start": 279,
"end": 9166
} | class ____(TestMigrations):
migrate_from = "0088_remove_monitor_slug_conditions"
migrate_to = "0089_update_cron_workflow_names"
app = "workflow_engine"
def setup_initial_state(self):
self.test_org = self.create_organization(
name="test-cron-migration-org", slug="test-cron-migration-org"
)
self.test_project = self.create_project(organization=self.test_org)
self.cron_detector1 = Detector.objects.create(
project=self.test_project,
type="monitor_check_in_failure",
name="Test Cron Detector 1",
config={},
)
self.cron_detector2 = Detector.objects.create(
project=self.test_project,
type="monitor_check_in_failure",
name="Test Cron Detector 2",
config={},
)
when_dcg1 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.workflow_three_actions = Workflow.objects.create(
organization=self.test_org,
name="Monitor Alert: my-monitor",
when_condition_group=when_dcg1,
enabled=True,
config={},
)
DetectorWorkflow.objects.create(
detector=self.cron_detector1, workflow=self.workflow_three_actions
)
if_dcg1 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
WorkflowDataConditionGroup.objects.create(
workflow=self.workflow_three_actions,
condition_group=if_dcg1,
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg1,
action=Action.objects.create(
type="slack",
config={
"target_display": "#alerts",
"target_identifier": "C1234567",
"target_type": 0,
},
),
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg1,
action=Action.objects.create(
type="email",
config={
"target_type": 4,
"target_display": None,
"target_identifier": None,
},
data={"fallthroughType": "ActiveMembers"},
),
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg1,
action=Action.objects.create(
type="sentry_app",
config={
"target_display": "My Custom App",
"target_identifier": "123",
"target_type": 3,
"sentry_app_identifier": "sentry_app_id",
},
),
)
when_dcg2 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.workflow_many_actions = Workflow.objects.create(
organization=self.test_org,
name="Monitor Alert: too-many",
when_condition_group=when_dcg2,
enabled=True,
config={},
)
DetectorWorkflow.objects.create(
detector=self.cron_detector2, workflow=self.workflow_many_actions
)
if_dcg2 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
WorkflowDataConditionGroup.objects.create(
workflow=self.workflow_many_actions,
condition_group=if_dcg2,
)
for i in range(5):
if i == 0:
action = Action.objects.create(
type="webhook",
config={},
)
else:
action = Action.objects.create(
type="slack",
config={
"target_display": f"#channel-{i}",
"target_identifier": f"C{1234567890 + i}",
"target_type": 0,
},
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg2,
action=action,
)
when_dcg3 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.workflow_no_actions = Workflow.objects.create(
organization=self.test_org,
name="Monitor Alert: no-actions",
when_condition_group=when_dcg3,
enabled=True,
config={},
)
DetectorWorkflow.objects.create(
detector=self.cron_detector1, workflow=self.workflow_no_actions
)
when_dcg4 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.workflow_no_prefix = Workflow.objects.create(
organization=self.test_org,
name="Custom Cron Alert",
when_condition_group=when_dcg4,
enabled=True,
config={},
)
DetectorWorkflow.objects.create(
detector=self.cron_detector1, workflow=self.workflow_no_prefix
)
when_dcg5 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.non_cron_workflow = Workflow.objects.create(
organization=self.test_org,
name="Monitor Alert: not-a-cron",
when_condition_group=when_dcg5,
enabled=True,
config={},
)
when_dcg6 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
self.workflow_other_actions = Workflow.objects.create(
organization=self.test_org,
name="Monitor Alert: other-actions",
when_condition_group=when_dcg6,
enabled=True,
config={},
)
DetectorWorkflow.objects.create(
detector=self.cron_detector1, workflow=self.workflow_other_actions
)
if_dcg6 = DataConditionGroup.objects.create(
organization=self.test_org,
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
WorkflowDataConditionGroup.objects.create(
workflow=self.workflow_other_actions,
condition_group=if_dcg6,
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg6,
action=Action.objects.create(
type="pagerduty",
config={
"target_identifier": "PDSERVICE1",
"target_display": "Critical Service",
"target_type": 0,
},
),
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg6,
action=Action.objects.create(
type="vsts",
config={
"target_identifier": None,
"target_display": None,
"target_type": 0,
},
),
)
DataConditionGroupAction.objects.create(
condition_group=if_dcg6,
action=Action.objects.create(
type="discord",
config={
"target_display": "#alerts",
"target_identifier": "123456789",
"target_type": 0,
},
),
)
def test_all_workflow_name_scenarios(self):
workflow = Workflow.objects.get(id=self.workflow_three_actions.id)
assert workflow.name == "Notify: Slack #alerts, Email Issue Owners, Notify My Custom App"
workflow = Workflow.objects.get(id=self.workflow_many_actions.id)
assert workflow.name == "Notify: Webhook, Slack #channel-1, Slack #channel-2...(+2)"
workflow = Workflow.objects.get(id=self.workflow_no_actions.id)
assert workflow.name == "Monitor Alert: no-actions"
workflow = Workflow.objects.get(id=self.workflow_no_prefix.id)
assert workflow.name == "Custom Cron Alert"
workflow = Workflow.objects.get(id=self.non_cron_workflow.id)
assert workflow.name == "Monitor Alert: not-a-cron"
workflow = Workflow.objects.get(id=self.workflow_other_actions.id)
assert workflow.name == "Notify: PagerDuty, Azure DevOps, Discord #alerts"
| TestUpdateCronWorkflowNames |
python | sqlalchemy__sqlalchemy | examples/generic_associations/table_per_related.py | {
"start": 2487,
"end": 3441
} | class ____(HasAddresses, Base):
company_name: Mapped[str]
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
session.add_all(
[
Customer(
name="customer 1",
addresses=[
Customer.Address(
street="123 anywhere street", city="New York", zip="10110"
),
Customer.Address(
street="40 main street", city="San Francisco", zip="95732"
),
],
),
Supplier(
company_name="Ace Hammers",
addresses=[
Supplier.Address(
street="2569 west elm", city="Detroit", zip="56785"
)
],
),
]
)
session.commit()
for customer in session.query(Customer):
for address in customer.addresses:
print(address)
print(address.parent)
| Supplier |
python | pytorch__pytorch | test/inductor/test_mem_estimation.py | {
"start": 2613,
"end": 6228
} | class ____(InductorTestCase):
def test_simple_linear_layers(self):
"""Test with a simple sequential model with explicit weights on CUDA."""
def create_inputs_and_weights():
"""Create inputs and weights on CUDA."""
x = torch.randn(32, 1000, device=GPU_TYPE)
w1 = torch.randn(500, 1000, device=GPU_TYPE)
w2 = torch.randn(100, 500, device=GPU_TYPE)
w3 = torch.randn(10, 100, device=GPU_TYPE)
return x, w1, w2, w3
def fn(x, w1, w2, w3):
h1 = torch.nn.functional.linear(x, w1)
h1 = torch.nn.functional.relu(h1)
h2 = torch.nn.functional.linear(h1, w2)
h2 = torch.nn.functional.relu(h2)
out = torch.nn.functional.linear(h2, w3)
return out
with FakeTensorMode():
# Trace with make_fx
x, w1, w2, w3 = create_inputs_and_weights()
fx_graph = make_fx(fn)(x, w1, w2, w3)
# Static analysis
def is_releasable(node):
return node.op not in ("placeholder", "get_attr")
fx_memory_profile = build_memory_profile(fx_graph.graph, is_releasable)
fx_peak = max(fx_memory_profile)
# Runtime profiling
profiler = FakeTensorMemoryProfilerMode()
with profiler:
x_runtime, w1_runtime, w2_runtime, w3_runtime = (
create_inputs_and_weights()
)
result = fn(x_runtime, w1_runtime, w2_runtime, w3_runtime)
del result
runtime_peak = profiler.max_memory
self.assertEqual(fx_peak, runtime_peak)
def test_conv_network(self):
"""Test with a convolutional network."""
def create_inputs_and_weights():
"""Create inputs and weights on CUDA."""
x = torch.randn(8, 3, 224, 224, device=GPU_TYPE)
conv1_weight = torch.randn(64, 3, 3, 3, device=GPU_TYPE)
conv2_weight = torch.randn(128, 64, 3, 3, device=GPU_TYPE)
linear_weight = torch.randn(10, 128 * 56 * 56, device=GPU_TYPE)
return x, conv1_weight, conv2_weight, linear_weight
def fn(x, conv1_weight, conv2_weight, linear_weight):
h = torch.nn.functional.conv2d(x, conv1_weight, padding=1)
h = torch.nn.functional.relu(h)
h = torch.nn.functional.max_pool2d(h, 2)
h = torch.nn.functional.conv2d(h, conv2_weight, padding=1)
h = torch.nn.functional.relu(h)
h = torch.nn.functional.max_pool2d(h, 2)
h = torch.flatten(h, 1)
out = torch.nn.functional.linear(h, linear_weight)
return out
with FakeTensorMode():
# Trace with make_fx
x, conv1_weight, conv2_weight, linear_weight = create_inputs_and_weights()
fx_graph = make_fx(fn)(x, conv1_weight, conv2_weight, linear_weight)
def is_releasable(node):
return node.op not in ("placeholder", "get_attr")
fx_memory_profile = build_memory_profile(fx_graph.graph, is_releasable)
fx_peak = max(fx_memory_profile)
# Runtime profiling
profiler = FakeTensorMemoryProfilerMode()
with profiler:
x_runtime, conv1_w, conv2_w, linear_w = create_inputs_and_weights()
result = fn(x_runtime, conv1_w, conv2_w, linear_w)
del result
runtime_peak = profiler.max_memory
self.assertEqual(fx_peak, runtime_peak)
| TestMemoryProfilingResNet |
python | mlflow__mlflow | tests/sagemaker/test_batch_deployment.py | {
"start": 942,
"end": 19533
} | class ____(NamedTuple):
model_path: str
run_id: str
model_uri: str
@pytest.fixture
def pretrained_model():
model_path = "model"
with mlflow.start_run():
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression(solver="lbfgs")
lr.fit(X, y)
mlflow.sklearn.log_model(lr, name=model_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/" + run_id + "/" + model_path
return TrainedModel(model_path, run_id, model_uri)
@pytest.fixture
def sagemaker_client():
return boto3.client("sagemaker", region_name="us-west-2")
def get_sagemaker_backend(region_name):
return mock_sagemaker.backends[DEFAULT_ACCOUNT_ID][region_name]
def mock_sagemaker_aws_services(fn):
from moto import mock_ecr, mock_iam, mock_s3, mock_sts
@mock_ecr
@mock_iam
@mock_s3
@mock_sagemaker
@mock_sts
@wraps(fn)
def mock_wrapper(*args, **kwargs):
# Create an ECR repository for the `mlflow-pyfunc` SageMaker docker image
ecr_client = boto3.client("ecr", region_name="us-west-2")
ecr_client.create_repository(repositoryName=mfs.DEFAULT_IMAGE_NAME)
# Create the moto IAM role
role_policy = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
"""
iam_client = boto3.client("iam", region_name="us-west-2")
iam_client.create_role(RoleName="moto", AssumeRolePolicyDocument=role_policy)
return fn(*args, **kwargs)
return mock_wrapper
def test_batch_deployment_with_unsupported_flavor_raises_exception(pretrained_model):
unsupported_flavor = "this is not a valid flavor"
match = "The specified flavor: `this is not a valid flavor` is not supported for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy_transform_job(
job_name="bad_flavor",
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
flavor=unsupported_flavor,
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_batch_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model):
logged_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
model_config_path = os.path.join(logged_model_path, "MLmodel")
model_config = Model.load(model_config_path)
del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
model_config.save(path=model_config_path)
match = "The specified model does not contain any of the supported flavors for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy_transform_job(
job_name="missing-flavor",
model_uri=logged_model_path,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
flavor=None,
)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_deploy_sagemaker_transform_job_in_asynchronous_mode_without_archiving_throws_exception(
pretrained_model,
):
with pytest.raises(MlflowException, match="Resources must be archived") as exc:
mfs.deploy_transform_job(
job_name="test-job",
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=False,
synchronous=False,
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_transform_job_and_s3_resources_with_expected_names_from_local(
pretrained_model, sagemaker_client
):
job_name = "test-job"
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=True,
)
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
model_name = transform_job_description["ModelName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any(model_name in object_name for object_name in object_names)
assert job_name in [
transform_job["TransformJobName"]
for transform_job in sagemaker_client.list_transform_jobs()["TransformJobSummaries"]
]
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_transform_job_and_s3_resources_with_expected_names_from_local(
pretrained_model, sagemaker_client
):
job_name = "test-job"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
"deploy-transform-job",
"--job-name",
job_name,
"--model-uri",
pretrained_model.model_uri,
"--input-data-type",
"Some Data Type",
"--input-uri",
"Some Input Uri",
"--content-type",
"Some Content Type",
"--output-path",
"Some Output Path",
"--archive",
],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
model_name = transform_job_description["ModelName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any(model_name in object_name for object_name in object_names)
assert job_name in [
transform_job["TransformJobName"]
for transform_job in sagemaker_client.list_transform_jobs()["TransformJobSummaries"]
]
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_transform_job_and_s3_resources_with_expected_names_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository(f"s3://{default_bucket}")
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = f"s3://{default_bucket}/{pretrained_model.model_path}"
job_name = "test-job"
mfs.deploy_transform_job(
job_name=job_name,
model_uri=model_s3_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=True,
)
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
model_name = transform_job_description["ModelName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
s3_client = boto3.client("s3", region_name=region_name)
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any(model_name in object_name for object_name in object_names)
assert job_name in [
transform_job["TransformJobName"]
for transform_job in sagemaker_client.list_transform_jobs()["TransformJobSummaries"]
]
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_transform_job_and_s3_resources_with_expected_names_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository(f"s3://{default_bucket}")
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = f"s3://{default_bucket}/{pretrained_model.model_path}"
job_name = "test-job"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
"deploy-transform-job",
"--job-name",
job_name,
"--model-uri",
model_s3_uri,
"--input-data-type",
"Some Data Type",
"--input-uri",
"Some Input Uri",
"--content-type",
"Some Content Type",
"--output-path",
"Some Output Path",
"--archive",
],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
model_name = transform_job_description["ModelName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any(model_name in object_name for object_name in object_names)
assert job_name in [
transform_job["TransformJobName"]
for transform_job in sagemaker_client.list_transform_jobs()["TransformJobSummaries"]
]
@mock_sagemaker_aws_services
def test_deploying_sagemaker_transform_job_with_preexisting_name_in_create_mode_throws_exception(
pretrained_model,
):
job_name = "test-job"
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
)
with pytest.raises(
MlflowException, match="a batch transform job with the same name already exists"
) as exc:
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@mock_sagemaker_aws_services
def test_deploy_in_synchronous_mode_waits_for_transform_job_creation_to_complete_before_returning(
pretrained_model, sagemaker_client
):
transform_job_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_transform_job_update_latency(
transform_job_creation_latency
)
job_name = "test-job"
deployment_start_time = time.time()
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
synchronous=True,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) >= transform_job_creation_latency
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
assert transform_job_description["TransformJobStatus"] == TransformJob.STATUS_COMPLETED
@mock_sagemaker_aws_services
def test_deploy_create_in_asynchronous_mode_returns_before_transform_job_creation_completes(
pretrained_model, sagemaker_client
):
transform_job_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_transform_job_update_latency(
transform_job_creation_latency
)
job_name = "test-job"
deployment_start_time = time.time()
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=True,
synchronous=False,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) < transform_job_creation_latency
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
assert transform_job_description["TransformJobStatus"] == TransformJob.STATUS_IN_PROGRESS
@mock_sagemaker_aws_services
def test_deploy_in_throw_exception_after_transform_job_creation_fails(
pretrained_model, sagemaker_client
):
transform_job_creation_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_transform_job_update_latency(transform_job_creation_latency)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_transform_job_creations(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation is a transform job creation, create the transform job and
set its status to ``TransformJob.STATUS_FAILED``.
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "CreateTransformJob":
transform_job_name = operation_kwargs["TransformJobName"]
sagemaker_backend.set_transform_job_latest_operation(
transform_job_name=transform_job_name,
operation=TransformJobOperation.create_unsuccessful(
latency_seconds=transform_job_creation_latency
),
)
return result
with (
mock.patch("botocore.client.BaseClient._make_api_call", new=fail_transform_job_creations),
pytest.raises(MlflowException, match="batch transform job failed") as exc,
):
mfs.deploy_transform_job(
job_name="test-job",
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
)
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@mock_sagemaker_aws_services
def test_attempting_to_terminate_in_asynchronous_mode_without_archiving_throws_exception(
pretrained_model,
):
job_name = "test-job"
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
)
with pytest.raises(MlflowException, match="Resources must be archived") as exc:
mfs.terminate_transform_job(
job_name=job_name,
archive=False,
synchronous=False,
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@mock_sagemaker_aws_services
def test_terminate_in_sync_mode_waits_for_transform_job_termination_to_complete_before_returning(
pretrained_model, sagemaker_client
):
transform_job_termination_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_transform_job_update_latency(
transform_job_termination_latency
)
job_name = "test-job"
termination_start_time = time.time()
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=True,
synchronous=True,
)
mfs.terminate_transform_job(job_name=job_name, synchronous=True)
termination_end_time = time.time()
assert (termination_end_time - termination_start_time) >= transform_job_termination_latency
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
assert transform_job_description["TransformJobStatus"] == TransformJob.STATUS_STOPPED
@mock_sagemaker_aws_services
def test_terminate_in_asynchronous_mode_returns_before_transform_job_termination_completes(
pretrained_model, sagemaker_client
):
transform_job_termination_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_transform_job_update_latency(
transform_job_termination_latency
)
job_name = "test-job"
termination_start_time = time.time()
mfs.deploy_transform_job(
job_name=job_name,
model_uri=pretrained_model.model_uri,
s3_input_data_type="Some Data Type",
s3_input_uri="Some Input Uri",
content_type="Some Content Type",
s3_output_path="Some Output Path",
archive=True,
synchronous=False,
)
mfs.terminate_transform_job(job_name=job_name, archive=True, synchronous=False)
termination_end_time = time.time()
assert (termination_end_time - termination_start_time) < transform_job_termination_latency
transform_job_description = sagemaker_client.describe_transform_job(TransformJobName=job_name)
assert transform_job_description["TransformJobStatus"] == TransformJob.STATUS_STOPPING
| TrainedModel |
python | tiangolo__fastapi | docs_src/dependencies/tutorial003_an_py310.py | {
"start": 176,
"end": 655
} | class ____:
def __init__(self, q: str | None = None, skip: int = 0, limit: int = 100):
self.q = q
self.skip = skip
self.limit = limit
@app.get("/items/")
async def read_items(commons: Annotated[Any, Depends(CommonQueryParams)]):
response = {}
if commons.q:
response.update({"q": commons.q})
items = fake_items_db[commons.skip : commons.skip + commons.limit]
response.update({"items": items})
return response
| CommonQueryParams |
python | openai__openai-python | src/openai/types/audio/transcription_text_segment_event.py | {
"start": 206,
"end": 681
} | class ____(BaseModel):
id: str
"""Unique identifier for the segment."""
end: float
"""End timestamp of the segment in seconds."""
speaker: str
"""Speaker label for this segment."""
start: float
"""Start timestamp of the segment in seconds."""
text: str
"""Transcript text for this segment."""
type: Literal["transcript.text.segment"]
"""The type of the event. Always `transcript.text.segment`."""
| TranscriptionTextSegmentEvent |
python | ansible__ansible | test/units/executor/module_common/test_module_common.py | {
"start": 4546,
"end": 7480
} | class ____:
ANSIBLE_MODULE_UTIL_STRINGS = (
# Absolute collection imports
b'import ansible_collections.my_ns.my_col.plugins.module_utils.my_util',
b'from ansible_collections.my_ns.my_col.plugins.module_utils import my_util',
b'from ansible_collections.my_ns.my_col.plugins.module_utils.my_util import my_func',
# Absolute core imports
b'import ansible.module_utils.basic',
b'from ansible.module_utils import basic',
b'from ansible.module_utils.basic import AnsibleModule',
# Relative imports
b'from ..module_utils import basic',
b'from .. module_utils import basic',
b'from ....module_utils import basic',
b'from ..module_utils.basic import AnsibleModule',
)
NOT_ANSIBLE_MODULE_UTIL_STRINGS = (
b'from ansible import release',
b'from ..release import __version__',
b'from .. import release',
b'from ansible.modules.system import ping',
b'from ansible_collecitons.my_ns.my_col.plugins.modules import function',
)
OFFSET = os.path.dirname(os.path.dirname(amc.__file__))
CORE_PATHS = (
('%s/modules/from_role.py' % OFFSET, 'ansible/modules/from_role'),
('%s/modules/system/ping.py' % OFFSET, 'ansible/modules/system/ping'),
('%s/modules/cloud/amazon/s3.py' % OFFSET, 'ansible/modules/cloud/amazon/s3'),
)
COLLECTION_PATHS = (
('/root/ansible_collections/ns/col/plugins/modules/ping.py',
'ansible_collections/ns/col/plugins/modules/ping'),
('/root/ansible_collections/ns/col/plugins/modules/subdir/ping.py',
'ansible_collections/ns/col/plugins/modules/subdir/ping'),
)
@pytest.mark.parametrize('testcase', ANSIBLE_MODULE_UTIL_STRINGS)
def test_detect_new_style_python_module_re(self, testcase):
assert amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
@pytest.mark.parametrize('testcase', NOT_ANSIBLE_MODULE_UTIL_STRINGS)
def test_no_detect_new_style_python_module_re(self, testcase):
assert not amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
@pytest.mark.parametrize('testcase, result', CORE_PATHS)
def test_detect_core_library_path_re(self, testcase, result):
assert amc.CORE_LIBRARY_PATH_RE.search(testcase).group('path') == result
@pytest.mark.parametrize('testcase', (p[0] for p in COLLECTION_PATHS))
def test_no_detect_core_library_path_re(self, testcase):
assert not amc.CORE_LIBRARY_PATH_RE.search(testcase)
@pytest.mark.parametrize('testcase, result', COLLECTION_PATHS)
def test_detect_collection_path_re(self, testcase, result):
assert amc.COLLECTION_PATH_RE.search(testcase).group('path') == result
@pytest.mark.parametrize('testcase', (p[0] for p in CORE_PATHS))
def test_no_detect_collection_path_re(self, testcase):
assert not amc.COLLECTION_PATH_RE.search(testcase)
| TestDetectionRegexes |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/base.py | {
"start": 1303,
"end": 49412
} | class ____(
QPlainTextEdit,
BaseEditMixin,
SpyderFontsMixin,
SpyderWidgetMixin
):
"""Text edit base widget"""
BRACE_MATCHING_SCOPE = ('sof', 'eof')
focus_in = Signal()
zoom_in = Signal()
zoom_out = Signal()
zoom_reset = Signal()
focus_changed = Signal()
sig_insert_completion = Signal(str)
sig_eol_chars_changed = Signal(str)
sig_prev_cursor = Signal()
sig_next_cursor = Signal()
def __init__(self, parent=None, class_parent=None):
QPlainTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
SpyderWidgetMixin.__init__(self, class_parent=class_parent)
self.has_cell_separators = False
self.setAttribute(Qt.WA_DeleteOnClose)
self._restore_selection_pos = None
# Trailing newlines/spaces trimming
self.remove_trailing_spaces = False
self.remove_trailing_newlines = False
# Add a new line when saving
self.add_newline = False
# Code snippets
self.code_snippets = True
self.cursorPositionChanged.connect(self.cursor_position_changed)
self.indent_chars = " "*4
self.tab_stop_width_spaces = 4
# Code completion / calltips
if parent is not None:
mainwin = parent
while not isinstance(mainwin, QMainWindow):
mainwin = mainwin.parent()
if mainwin is None:
break
if mainwin is not None:
parent = mainwin
self.completion_widget = CompletionWidget(self, parent)
self.codecompletion_auto = False
self.setup_completion()
self.calltip_widget = CallTipWidget(self, hide_timer_on=False)
self.tooltip_widget = ToolTipWidget(self)
self.highlight_current_cell_enabled = False
# The color values may be overridden by the syntax highlighter
# Highlight current line color
self.currentline_color = QColor(
SpyderPalette.COLOR_ERROR_2).lighter(190)
self.currentcell_color = QColor(
SpyderPalette.COLOR_ERROR_2).lighter(194)
# Brace matching
self.bracepos = None
self.matched_p_color = QColor(SpyderPalette.COLOR_SUCCESS_1)
self.unmatched_p_color = QColor(SpyderPalette.COLOR_ERROR_2)
self.decorations = TextDecorationsManager(self)
# Save current cell. This is invalidated as soon as the text changes.
# Useful to avoid recomputing while scrolling.
self.current_cell = None
def reset_current_cell():
self.current_cell = None
self.highlight_current_cell()
self.textChanged.connect(reset_current_cell)
# Cache
self._current_cell_cursor = None
self._current_line_block = None
def setup_completion(self):
size = self.get_conf('completion/size', section='main')
font = self.get_font(SpyderFontType.Monospace)
self.completion_widget.setup_appearance(size, font)
def set_indent_chars(self, indent_chars):
self.indent_chars = indent_chars
def set_tab_stop_width_spaces(self, tab_stop_width_spaces):
self.tab_stop_width_spaces = tab_stop_width_spaces
self.update_tab_stop_width_spaces()
def set_remove_trailing_spaces(self, flag):
self.remove_trailing_spaces = flag
def set_add_newline(self, add_newline):
self.add_newline = add_newline
def set_remove_trailing_newlines(self, flag):
self.remove_trailing_newlines = flag
def update_tab_stop_width_spaces(self):
self.setTabStopWidth(self.fontMetrics().width(
' ' * self.tab_stop_width_spaces))
def set_palette(self, background, foreground):
"""
Set text editor palette colors:
background color and caret (text cursor) color
"""
# Because QtStylsheet overrides QPalette and because some style do not
# use the palette for all drawing (e.g. macOS styles), the background
# and foreground color of each TextEditBaseWidget instance must be set
# with a stylesheet extended with an ID Selector.
# Fixes spyder-ide/spyder#2028, spyder-ide/spyder#8069 and
# spyder-ide/spyder#9248.
if not self.objectName():
self.setObjectName(self.__class__.__name__ + str(id(self)))
style = "QPlainTextEdit#%s {background: %s; color: %s;}" % \
(self.objectName(), background.name(), foreground.name())
self.setStyleSheet(style)
# ---- Extra selections
def get_extra_selections(self, key):
"""Return editor extra selections.
Args:
key (str) name of the extra selections group
Returns:
list of sourcecode.api.TextDecoration.
"""
return self.decorations.get(key, [])
def set_extra_selections(self, key, extra_selections):
"""Set extra selections for a key.
Also assign draw orders to leave current_cell and current_line
in the background (and avoid them to cover other decorations)
NOTE: This will remove previous decorations added to the same key.
Args:
key (str) name of the extra selections group.
extra_selections (list of sourcecode.api.TextDecoration).
"""
# use draw orders to highlight current_cell and current_line first
draw_order = DRAW_ORDERS.get(key)
if draw_order is None:
draw_order = DRAW_ORDERS.get('on_top')
for selection in extra_selections:
selection.draw_order = draw_order
selection.kind = key
self.decorations.add_key(key, extra_selections)
self.update()
def clear_extra_selections(self, key):
"""Remove decorations added through set_extra_selections.
Args:
key (str) name of the extra selections group.
"""
self.decorations.remove_key(key)
self.update()
def get_visible_block_numbers(self):
"""Get the first and last visible block numbers."""
first = self.firstVisibleBlock().blockNumber()
bottom_right = QPoint(self.viewport().width() - 1,
self.viewport().height() - 1)
last = self.cursorForPosition(bottom_right).blockNumber()
return (first, last)
def get_buffer_block_numbers(self):
"""
Get the first and last block numbers of a region that covers
the visible one plus a buffer of half that region above and
below to make more fluid certain operations.
"""
first_visible, last_visible = self.get_visible_block_numbers()
buffer_height = round((last_visible - first_visible) / 2)
first = first_visible - buffer_height
first = 0 if first < 0 else first
last = last_visible + buffer_height
last = self.blockCount() if last > self.blockCount() else last
return (first, last)
# ------Highlight current line
def highlight_current_line(self):
"""Highlight current line"""
cursor = self.textCursor()
block = cursor.block()
if self._current_line_block == block:
return
self._current_line_block = block
selection = TextDecoration(cursor)
selection.format.setProperty(QTextFormat.FullWidthSelection,
to_qvariant(True))
selection.format.setBackground(self.currentline_color)
selection.cursor.clearSelection()
self.set_extra_selections('current_line', [selection])
def unhighlight_current_line(self):
"""Unhighlight current line"""
self._current_line_block = None
self.clear_extra_selections('current_line')
# ------Highlight current cell
def highlight_current_cell(self):
"""Highlight current cell"""
if (not self.has_cell_separators or
not self.highlight_current_cell_enabled):
self._current_cell_cursor = None
return
cursor, whole_file_selected = self.select_current_cell()
def same_selection(c1, c2):
if c1 is None or c2 is None:
return False
return (
c1.selectionStart() == c2.selectionStart() and
c1.selectionEnd() == c2.selectionEnd()
)
if same_selection(self._current_cell_cursor, cursor):
# Already correct
return
self._current_cell_cursor = cursor
selection = TextDecoration(cursor)
selection.format.setProperty(QTextFormat.FullWidthSelection,
to_qvariant(True))
selection.format.setBackground(self.currentcell_color)
if whole_file_selected:
self.clear_extra_selections('current_cell')
else:
self.set_extra_selections('current_cell', [selection])
def unhighlight_current_cell(self):
"""Unhighlight current cell"""
self._current_cell_cursor = None
self.clear_extra_selections('current_cell')
def in_comment(self, cursor=None, position=None):
"""Returns True if the given position is inside a comment.
Trivial default implementation. To be overridden by subclass.
This function is used to define the default behaviour of
self.find_brace_match.
"""
return False
def in_string(self, cursor=None, position=None):
"""Returns True if the given position is inside a string.
Trivial default implementation. To be overridden by subclass.
This function is used to define the default behaviour of
self.find_brace_match.
"""
return False
def find_brace_match(self, position, brace, forward,
ignore_brace=None, stop=None):
"""Returns position of matching brace.
Parameters
----------
position : int
The position of the brace to be matched.
brace : {'[', ']', '(', ')', '{', '}'}
The brace character to be matched.
[ <-> ], ( <-> ), { <-> }
forward : boolean
Whether to search forwards or backwards for a match.
ignore_brace : callable taking int returning boolean, optional
Whether to ignore a brace (as function of position).
stop : callable taking int returning boolean, optional
Whether to stop the search early (as function of position).
If both *ignore_brace* and *stop* are None, then brace matching
is handled differently depending on whether *position* is
inside a string, comment or regular code. If in regular code,
then any braces inside strings and comments are ignored. If in a
string/comment, then only braces in the same string/comment are
considered potential matches. The functions self.in_comment and
self.in_string are used to determine string/comment/code status
of characters in this case.
If exactly one of *ignore_brace* and *stop* is None, then it is
replaced by a function returning False for every position. I.e.:
lambda pos: False
Returns
-------
The position of the matching brace. If no matching brace
exists, then None is returned.
"""
if ignore_brace is None and stop is None:
if self.in_string(position=position):
# Only search inside the current string
def stop(pos):
return not self.in_string(position=pos)
elif self.in_comment(position=position):
# Only search inside the current comment
def stop(pos):
return not self.in_comment(position=pos)
else:
# Ignore braces inside strings and comments
def ignore_brace(pos):
return (self.in_string(position=pos) or
self.in_comment(position=pos))
# Deal with search range and direction
start_pos, end_pos = self.BRACE_MATCHING_SCOPE
if forward:
closing_brace = {'(': ')', '[': ']', '{': '}'}[brace]
text = self.get_text(position, end_pos, remove_newlines=False)
else:
# Handle backwards search with the same code as forwards
# by reversing the string to be searched.
closing_brace = {')': '(', ']': '[', '}': '{'}[brace]
text = self.get_text(start_pos, position+1, remove_newlines=False)
text = text[-1::-1] # reverse
def ind2pos(index):
"""Computes editor position from search index."""
return (position + index) if forward else (position - index)
# Search starts at the first position after the given one
# (which is assumed to contain a brace).
i_start_close = 1
i_start_open = 1
while True:
i_close = text.find(closing_brace, i_start_close)
i_start_close = i_close+1 # next potential start
if i_close == -1:
return # no matching brace exists
elif ignore_brace is None or not ignore_brace(ind2pos(i_close)):
while True:
i_open = text.find(brace, i_start_open, i_close)
i_start_open = i_open+1 # next potential start
if i_open == -1:
# found matching brace, but should we have
# stopped before this point?
if stop is not None:
# There's room for optimization here...
for i in range(1, i_close+1):
if stop(ind2pos(i)):
return
return ind2pos(i_close)
elif (ignore_brace is None or
not ignore_brace(ind2pos(i_open))):
break # must find new closing brace
def __highlight(self, positions, color=None, cancel=False):
if cancel:
self.clear_extra_selections('brace_matching')
return
extra_selections = []
for position in positions:
if position > self.get_position('eof'):
return
selection = TextDecoration(self.textCursor())
selection.format.setBackground(color)
selection.cursor.clearSelection()
selection.cursor.setPosition(position)
selection.cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
extra_selections.append(selection)
self.set_extra_selections('brace_matching', extra_selections)
def cursor_position_changed(self):
"""Handle brace matching."""
# Clear last brace highlight (if any)
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None
# Get the current cursor position, check if it is at a brace,
# and, if so, determine the direction in which to search for able
# matching brace.
cursor = self.textCursor()
if cursor.position() == 0:
return
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
text = str(cursor.selectedText())
if text in (')', ']', '}'):
forward = False
elif text in ('(', '[', '{'):
forward = True
else:
return
pos1 = cursor.position()
pos2 = self.find_brace_match(pos1, text, forward=forward)
# Set a new brace highlight
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color)
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color)
# -----Widget setup and options
def set_wrap_mode(self, mode=None):
"""
Set wrap mode
Valid *mode* values: None, 'word', 'character'
"""
if mode == 'word':
wrap_mode = QTextOption.WrapAtWordBoundaryOrAnywhere
elif mode == 'character':
wrap_mode = QTextOption.WrapAnywhere
else:
wrap_mode = QTextOption.NoWrap
self.setWordWrapMode(wrap_mode)
# ------Reimplementing Qt methods
@Slot()
def copy(self):
"""
Reimplement Qt method
Copy text to clipboard with correct EOL chars
"""
if self.get_selected_text():
QApplication.clipboard().setText(self.get_selected_text())
else:
cursor = self.select_current_line_and_sep(set_cursor=False)
QApplication.clipboard().setText(self.get_selected_text(cursor))
def toPlainText(self):
"""
Reimplement Qt method
Fix PyQt4 bug on Windows and Python 3
"""
# Fix what appears to be a PyQt4 bug when getting file
# contents under Windows and PY3. This bug leads to
# corruptions when saving files with certain combinations
# of unicode chars on them (like the one attached on
# spyder-ide/spyder#1546).
if os.name == 'nt':
text = self.get_text('sof', 'eof')
return text.replace('\u2028', '\n').replace('\u2029', '\n')\
.replace('\u0085', '\n')
return super().toPlainText()
def keyPressEvent(self, event):
key = event.key()
ctrl = event.modifiers() & Qt.ControlModifier
meta = event.modifiers() & Qt.MetaModifier
# Use our own copy method for {Ctrl,Cmd}+C to avoid Qt
# copying text in HTML. See spyder-ide/spyder#2285.
if (ctrl or meta) and key == Qt.Key_C:
self.copy()
else:
super().keyPressEvent(event)
# ------Text: get, set, ...
def get_cell_list(self):
"""Get all cells."""
# Reimplemented in childrens
return []
def get_selection_as_executable_code(self, cursor=None):
"""
Get selected text in a way that allows other plugins to execute it.
"""
ls = self.get_line_separator()
_indent = lambda line: len(line)-len(line.lstrip())
line_from, line_to = self.get_selection_bounds(cursor)
line_col_from, line_col_to = self.get_selection_start_end(cursor)
line_from_off, line_to_off = self.get_selection_offsets(cursor)
text = self.get_selected_text(cursor)
if not text:
return
lines = text.split(ls)
if len(lines) > 1:
# Multiline selection -> eventually fixing indentation
original_indent = _indent(self.get_text_line(line_from))
text = (" " * (original_indent - _indent(lines[0]))) + text
# If there is a common indent to all lines, find it.
# Moving from bottom line to top line ensures that blank
# lines inherit the indent of the line *below* it,
# which is the desired behavior.
min_indent = 999
current_indent = 0
lines = text.split(ls)
for i in range(len(lines) - 1, -1, -1):
line = lines[i]
if line.strip():
current_indent = _indent(line)
min_indent = min(current_indent, min_indent)
else:
lines[i] = ' ' * current_indent
if min_indent:
lines = [line[min_indent:] for line in lines]
# Remove any leading whitespace or comment lines
# since they confuse the reserved word detector that follows below
lines_removed = 0
while lines:
first_line = lines[0].lstrip()
if first_line == '' or first_line[0] == '#':
lines_removed += 1
lines.pop(0)
else:
break
# Add removed lines back to have correct traceback line numbers
leading_lines_str = ls * lines_removed
return (
leading_lines_str + ls.join(lines),
(line_from_off, line_to_off),
(line_col_from, line_col_to)
)
def get_cell_as_executable_code(self, cursor=None):
"""Return cell contents as executable code."""
if cursor is None:
cursor = self.textCursor()
ls = self.get_line_separator()
cursor, __ = self.select_current_cell(cursor)
line_from, __ = self.get_selection_bounds(cursor)
# Get the block for the first cell line
start = cursor.selectionStart()
block = self.document().findBlock(start)
if not is_cell_header(block) and start > 0:
block = self.document().findBlock(start - 1)
# Get text
text, off_pos, col_pos = self.get_selection_as_executable_code(cursor)
if text is not None:
text = ls * line_from + text
return text, block, off_pos, col_pos
def select_current_cell(self, cursor=None):
"""
Select cell under cursor in the visible portion of the file
cell = group of lines separated by CELL_SEPARATORS
returns
-the textCursor
-a boolean indicating if the entire file is selected
"""
if cursor is None:
cursor = self.textCursor()
if self.current_cell:
current_cell, cell_full_file = self.current_cell
cell_start_pos = current_cell.selectionStart()
cell_end_position = current_cell.selectionEnd()
# Check if the saved current cell is still valid
if cell_start_pos <= cursor.position() < cell_end_position:
return current_cell, cell_full_file
else:
self.current_cell = None
block = cursor.block()
try:
if is_cell_header(block):
header = block.userData().oedata
else:
header = next(document_cells(
block, forward=False,
cell_list=self.get_cell_list()))
cell_start_pos = header.block.position()
cell_at_file_start = False
cursor.setPosition(cell_start_pos)
except StopIteration:
# This cell has no header, so it is the first cell.
cell_at_file_start = True
cursor.movePosition(QTextCursor.Start)
try:
footer = next(document_cells(
block, forward=True,
cell_list=self.get_cell_list()))
cell_end_position = footer.block.position()
cell_at_file_end = False
cursor.setPosition(cell_end_position, QTextCursor.KeepAnchor)
except StopIteration:
# This cell has no next header, so it is the last cell.
cell_at_file_end = True
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
cell_full_file = cell_at_file_start and cell_at_file_end
self.current_cell = (cursor, cell_full_file)
return cursor, cell_full_file
def select_current_line_and_sep(self, cursor=None, set_cursor=True):
"""
Selects the current line, including the correct line separator to
delete or copy the whole current line.
This means:
- If there is a next block, select the current block's newline char.
- Else if there is a previous block, select the previous newline char.
- Else select no newline char (1-line file)
Does a similar thing to `cursor.select(QTextCursor.BlockUnderCursor)`,
which always selects the previous newline char.
"""
if cursor is None:
cursor = self.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock, QTextCursor.MoveAnchor)
if not cursor.movePosition(QTextCursor.NextBlock,
QTextCursor.KeepAnchor):
# if there is no next Block, we select the previous newline
if cursor.movePosition(QTextCursor.PreviousBlock,
QTextCursor.MoveAnchor):
cursor.movePosition(QTextCursor.EndOfBlock,
QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.NextBlock,
QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfBlock,
QTextCursor.KeepAnchor)
else:
# if there is no previous block, we can select the current line
# this is the 1-line file case
cursor.select(QTextCursor.BlockUnderCursor)
if set_cursor:
self.setTextCursor(cursor)
return cursor
def go_to_next_cell(self):
"""Go to the next cell of lines"""
cursor = self.textCursor()
block = cursor.block()
try:
footer = next(document_cells(
block, forward=True,
cell_list=self.get_cell_list()))
cursor.setPosition(footer.block.position())
except StopIteration:
return
self.setTextCursor(cursor)
def go_to_previous_cell(self):
"""Go to the previous cell of lines"""
cursor = self.textCursor()
block = cursor.block()
if is_cell_header(block):
block = block.previous()
try:
header = next(document_cells(
block, forward=False,
cell_list=self.get_cell_list()))
cursor.setPosition(header.block.position())
except StopIteration:
return
self.setTextCursor(cursor)
def get_line_count(self):
"""Return document total line number"""
return self.blockCount()
def paintEvent(self, e):
"""
Override Qt method to restore text selection after text gets inserted
at the current position of the cursor.
See spyder-ide/spyder#11089 for more info.
"""
if self._restore_selection_pos is not None:
self.__restore_selection(*self._restore_selection_pos)
self._restore_selection_pos = None
super().paintEvent(e)
def __save_selection(self):
"""Save current cursor selection and return position bounds"""
cursor = self.textCursor()
return cursor.selectionStart(), cursor.selectionEnd()
def __restore_selection(self, start_pos, end_pos):
"""Restore cursor selection from position bounds"""
cursor = self.textCursor()
cursor.setPosition(start_pos)
cursor.setPosition(end_pos, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def __duplicate_line_or_selection(self, after_current_line=True):
"""Duplicate current line or selected text"""
cursor = self.textCursor()
cursor.beginEditBlock()
cur_pos = cursor.position()
start_pos, end_pos = self.__save_selection()
end_pos_orig = end_pos
if str(cursor.selectedText()):
cursor.setPosition(end_pos)
# Check if end_pos is at the start of a block: if so, starting
# changes from the previous block
cursor.movePosition(QTextCursor.StartOfBlock,
QTextCursor.KeepAnchor)
if not str(cursor.selectedText()):
cursor.movePosition(QTextCursor.PreviousBlock)
end_pos = cursor.position()
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
while cursor.position() <= end_pos:
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
if cursor.atEnd():
cursor_temp = QTextCursor(cursor)
cursor_temp.clearSelection()
cursor_temp.insertText(self.get_line_separator())
break
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
text = cursor.selectedText()
cursor.clearSelection()
if not after_current_line:
# Moving cursor before current line/selected text
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
start_pos += len(text)
end_pos_orig += len(text)
cur_pos += len(text)
# We save the end and start position of the selection, so that it
# can be restored within the paint event that is triggered by the
# text insertion. This is done to prevent a graphical glitch that
# occurs when text gets inserted at the current position of the cursor.
# See spyder-ide/spyder#11089 for more info.
if cur_pos == start_pos:
self._restore_selection_pos = (end_pos_orig, start_pos)
else:
self._restore_selection_pos = (start_pos, end_pos_orig)
cursor.insertText(text)
cursor.endEditBlock()
def duplicate_line_down(self):
"""
Copy current line or selected text and paste the duplicated text
*after* the current line or selected text.
"""
self.__duplicate_line_or_selection(after_current_line=False)
def duplicate_line_up(self):
"""
Copy current line or selected text and paste the duplicated text
*before* the current line or selected text.
"""
self.__duplicate_line_or_selection(after_current_line=True)
def move_line_or_selection(self, after_current_line=True):
"""Move current line or selected text"""
cursor = self.textCursor()
cursor.beginEditBlock()
start_pos, end_pos = self.__save_selection()
last_line = False
# ------ Select text
# Get selection start location
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
start_pos = cursor.position()
# Get selection end location
cursor.setPosition(end_pos)
if not cursor.atBlockStart() or end_pos == start_pos:
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.movePosition(QTextCursor.NextBlock)
end_pos = cursor.position()
# Check if selection ends on the last line of the document
if cursor.atEnd():
if not cursor.atBlockStart() or end_pos == start_pos:
last_line = True
# ------ Stop if at document boundary
cursor.setPosition(start_pos)
if cursor.atStart() and not after_current_line:
# Stop if selection is already at top of the file while moving up
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return
cursor.setPosition(end_pos, QTextCursor.KeepAnchor)
if last_line and after_current_line:
# Stop if selection is already at end of the file while moving down
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return
# ------ Move text
sel_text = str(cursor.selectedText())
cursor.removeSelectedText()
if after_current_line:
# Shift selection down
text = str(cursor.block().text())
sel_text = os.linesep + sel_text[0:-1] # Move linesep at the start
cursor.movePosition(QTextCursor.EndOfBlock)
start_pos += len(text)+1
end_pos += len(text)
if not cursor.atEnd():
end_pos += 1
else:
# Shift selection up
if last_line:
# Remove the last linesep and add it to the selected text
cursor.deletePreviousChar()
sel_text = sel_text + os.linesep
cursor.movePosition(QTextCursor.StartOfBlock)
end_pos += 1
else:
cursor.movePosition(QTextCursor.PreviousBlock)
text = str(cursor.block().text())
start_pos -= len(text)+1
end_pos -= len(text)+1
cursor.insertText(sel_text)
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
def go_to_new_line(self):
"""Go to the end of the current line and create a new line"""
self.stdkey_end(False, False)
self.insert_text(self.get_line_separator())
def extend_selection_to_complete_lines(self):
"""Extend current selection to complete lines"""
cursor = self.textCursor()
start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd()
cursor.setPosition(start_pos)
cursor.setPosition(end_pos, QTextCursor.KeepAnchor)
if cursor.atBlockStart():
cursor.movePosition(QTextCursor.PreviousBlock,
QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfBlock,
QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def set_selection(self, start, end):
cursor = self.textCursor()
cursor.setPosition(start)
cursor.setPosition(end, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def truncate_selection(self, position_from):
"""Unselect read-only parts in shell, like prompt"""
position_from = self.get_position(position_from)
cursor = self.textCursor()
start, end = cursor.selectionStart(), cursor.selectionEnd()
if start < end:
start = max([position_from, start])
else:
end = max([position_from, end])
self.set_selection(start, end)
def restrict_cursor_position(self, position_from, position_to):
"""In shell, avoid editing text except between prompt and EOF"""
position_from = self.get_position(position_from)
position_to = self.get_position(position_to)
cursor = self.textCursor()
cursor_position = cursor.position()
if cursor_position < position_from or cursor_position > position_to:
self.set_cursor_position(position_to)
# ------Code completion / Calltips
def select_completion_list(self):
"""Completion list is active, Enter was just pressed"""
self.completion_widget.item_selected()
def insert_completion(self, completion, completion_position):
"""
Insert a completion into the editor.
completion_position is where the completion was generated.
The replacement range is computed using the (LSP) completion's
textEdit field if it exists. Otherwise, we replace from the
start of the word under the cursor.
"""
if not completion:
return
cursor = self.textCursor()
has_selected_text = self.has_selected_text()
selection_start, selection_end = self.get_selection_start_end()
if isinstance(completion, dict) and 'textEdit' in completion:
completion_range = completion['textEdit']['range']
start = completion_range['start']
end = completion_range['end']
if isinstance(completion_range['start'], dict):
start_line, start_col = start['line'], start['character']
start = self.get_position_line_number(start_line, start_col)
if isinstance(completion_range['start'], dict):
end_line, end_col = end['line'], end['character']
end = self.get_position_line_number(end_line, end_col)
cursor.setPosition(start)
cursor.setPosition(end, QTextCursor.KeepAnchor)
text = str(completion['textEdit']['newText'])
else:
text = completion
kind = None
if isinstance(completion, dict):
text = completion['insertText']
kind = completion['kind']
text = str(text)
# Get word to the left of the cursor.
result = self.get_current_word_and_position(
completion=True, valid_python_variable=False)
if result is not None:
current_text, start_position = result
end_position = start_position + len(current_text)
# Remove text under cursor only if it's not an autocompletion
# character
is_auto_completion_character = False
if self.objectName() == 'console':
if current_text == '.':
is_auto_completion_character = True
else:
if (
kind != CompletionItemKind.FILE and
current_text in self.auto_completion_characters
):
is_auto_completion_character = True
# Adjustments for file completions
if kind == CompletionItemKind.FILE:
special_chars = ['"', "'", '/', '\\']
if any(
[current_text.endswith(c) for c in special_chars]
):
# This is necessary when completions are requested next
# to special characters.
start_position = end_position
elif current_text.endswith('.') and len(current_text) > 1:
# This inserts completions for files or directories
# that start with a dot
start_position = end_position - 1
elif current_text == '.':
# This is needed if users are asking for file
# completions to the right of a dot when some of its
# name is part of the completed text
cursor_1 = self.textCursor()
found_start = False
# Select text backwards until we find where the file
# name starts
while not found_start:
cursor_1.movePosition(
QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor,
)
selection = str(cursor_1.selectedText())
if text.startswith(selection):
found_start = True
current_text = str(cursor_1.selectedText())
start_position = cursor_1.selectionStart()
end_position = cursor_1.selectionEnd()
if not is_auto_completion_character:
# Check if the completion position is in the expected range
if not (
start_position <= completion_position <= end_position
):
return
cursor.setPosition(start_position)
# Remove the word under the cursor
cursor.setPosition(end_position, QTextCursor.KeepAnchor)
else:
# Check if we are in the correct position
if cursor.position() != completion_position:
return
else:
# Check if we are in the correct position
if cursor.position() != completion_position:
return
if has_selected_text:
self.sig_will_remove_selection.emit(selection_start, selection_end)
cursor.removeSelectedText()
self.setTextCursor(cursor)
# Add text
if self.objectName() == 'console':
# Handle completions for the internal console
self.insert_text(text)
else:
self.sig_insert_completion.emit(text)
def is_completion_widget_visible(self):
"""Return True is completion list widget is visible"""
try:
return self.completion_widget.isVisible()
except RuntimeError:
# This is to avoid a RuntimeError exception when the widget is
# already been deleted. See spyder-ide/spyder#13248.
return False
def hide_completion_widget(self, focus_to_parent=True):
"""Hide completion widget and tooltip."""
# This is necessary to catch an error when creating new editor windows.
# Fixes spyder-ide/spyder#19109
try:
self.completion_widget.hide(focus_to_parent=focus_to_parent)
except RuntimeError:
pass
QToolTip.hideText()
# ------Standard keys
def stdkey_clear(self):
if not self.has_selected_text():
self.moveCursor(QTextCursor.NextCharacter, QTextCursor.KeepAnchor)
self.remove_selected_text()
def stdkey_backspace(self):
if not self.has_selected_text():
self.moveCursor(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
self.remove_selected_text()
def __get_move_mode(self, shift):
return QTextCursor.KeepAnchor if shift else QTextCursor.MoveAnchor
def stdkey_up(self, shift):
self.moveCursor(QTextCursor.Up, self.__get_move_mode(shift))
def stdkey_down(self, shift):
self.moveCursor(QTextCursor.Down, self.__get_move_mode(shift))
def stdkey_tab(self):
self.insert_text(self.indent_chars)
def stdkey_home(self, shift, ctrl, prompt_pos=None):
"""Smart HOME feature: cursor is first moved at
indentation position, then at the start of the line"""
move_mode = self.__get_move_mode(shift)
if ctrl:
self.moveCursor(QTextCursor.Start, move_mode)
else:
cursor = self.textCursor()
if prompt_pos is None:
start_position = self.get_position('sol')
else:
start_position = self.get_position(prompt_pos)
text = self.get_text(start_position, 'eol')
indent_pos = start_position+len(text)-len(text.lstrip())
if cursor.position() != indent_pos:
cursor.setPosition(indent_pos, move_mode)
else:
cursor.setPosition(start_position, move_mode)
self.setTextCursor(cursor)
def stdkey_end(self, shift, ctrl):
move_mode = self.__get_move_mode(shift)
if ctrl:
self.moveCursor(QTextCursor.End, move_mode)
else:
self.moveCursor(QTextCursor.EndOfBlock, move_mode)
# ----Qt Events
def mousePressEvent(self, event):
"""Reimplement Qt method"""
# mouse buttons for forward and backward navigation
if event.button() == Qt.XButton1:
self.sig_prev_cursor.emit()
elif event.button() == Qt.XButton2:
self.sig_next_cursor.emit()
if sys.platform.startswith('linux') and event.button() == Qt.MidButton:
self.calltip_widget.hide()
self.setFocus()
event = QMouseEvent(QEvent.MouseButtonPress, event.position(),
Qt.LeftButton, Qt.LeftButton, Qt.NoModifier)
QPlainTextEdit.mousePressEvent(self, event)
QPlainTextEdit.mouseReleaseEvent(self, event)
# Send selection text to clipboard to be able to use
# the paste method and avoid the strange spyder-ide/spyder#1445.
# NOTE: This issue seems a focusing problem but it
# seems really hard to track
mode_clip = QClipboard.Clipboard
mode_sel = QClipboard.Selection
text_clip = QApplication.clipboard().text(mode=mode_clip)
text_sel = QApplication.clipboard().text(mode=mode_sel)
QApplication.clipboard().setText(text_sel, mode=mode_clip)
self.paste()
QApplication.clipboard().setText(text_clip, mode=mode_clip)
else:
self.calltip_widget.hide()
QPlainTextEdit.mousePressEvent(self, event)
def focusInEvent(self, event):
"""Reimplemented to handle focus"""
self.focus_changed.emit()
self.focus_in.emit()
QPlainTextEdit.focusInEvent(self, event)
def focusOutEvent(self, event):
"""Reimplemented to handle focus"""
self.focus_changed.emit()
QPlainTextEdit.focusOutEvent(self, event)
def wheelEvent(self, event):
"""Reimplemented to emit zoom in/out signals when Ctrl is pressed"""
# This feature is disabled on MacOS, see spyder-ide/spyder#1510.
if (
sys.platform != 'darwin'
and not self.get_conf('disable_zoom_mouse', section='main')
):
if event.modifiers() & Qt.ControlModifier:
if hasattr(event, 'angleDelta'):
if event.angleDelta().y() < 0:
self.zoom_out.emit()
elif event.angleDelta().y() > 0:
self.zoom_in.emit()
elif hasattr(event, 'delta'):
if event.delta() < 0:
self.zoom_out.emit()
elif event.delta() > 0:
self.zoom_in.emit()
return
QPlainTextEdit.wheelEvent(self, event)
# Needed to prevent stealing focus when scrolling.
# If the current widget with focus is the CompletionWidget, it means
# it's being displayed in the editor, so we need to hide it and give
# focus back to the editor. If not, we need to leave the focus in
# the widget that currently has it.
# See spyder-ide/spyder#11502
current_widget = QApplication.focusWidget()
if isinstance(current_widget, CompletionWidget):
self.hide_completion_widget(focus_to_parent=True)
else:
self.hide_completion_widget(focus_to_parent=False)
def position_widget_at_cursor(self, widget):
# Retrieve current screen height
srect = self.screen().availableGeometry()
left, top, right, bottom = (srect.left(), srect.top(),
srect.right(), srect.bottom())
ancestor = widget.parent()
if ancestor:
left = max(left, ancestor.x())
top = max(top, ancestor.y())
right = min(right, ancestor.x() + ancestor.width())
bottom = min(bottom, ancestor.y() + ancestor.height())
point = self.cursorRect().bottomRight()
point = self.calculate_real_position(point)
point = self.mapToGlobal(point)
# Move to left of cursor if not enough space on right
widget_right = point.x() + widget.width()
if widget_right > right:
point.setX(point.x() - widget.width())
# Push to right if not enough space on left
if point.x() < left:
point.setX(left)
# Moving widget above if there is not enough space below
widget_bottom = point.y() + widget.height()
x_position = point.x()
if widget_bottom > bottom:
point = self.cursorRect().topRight()
point = self.mapToGlobal(point)
point.setX(x_position)
point.setY(point.y() - widget.height())
delta_y = -2
else:
delta_y = 5 if sys.platform == "darwin" else 6
# Add small delta to the vertical position so that the widget is not
# shown too close to the text
point.setY(point.y() + delta_y)
if ancestor is not None:
# Useful only if we set parent to 'ancestor' in __init__
point = ancestor.mapFromGlobal(point)
widget.move(point)
def calculate_real_position(self, point):
return point
| TextEditBaseWidget |
python | pytorch__pytorch | test/inductor/test_pad_mm.py | {
"start": 674,
"end": 24018
} | class ____(TestCase):
def setUp(self):
super().setUp()
if not is_big_gpu():
return self.skipTest("Need a big GPU to run max_autotune=True")
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_mm_dyn_m(self):
M = 40
K1 = 581
K2 = 49
N = 30
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = rand_strided(
(K2, N), (1, K2), device=GPU_TYPE, dtype=torch.float32
)
def forward(self, a):
a1 = torch.narrow(a, 1, 0, K2)
return torch.mm(a1, self.w)
fn = Model().to(GPU_TYPE)
a = rand_strided((M, K1), (K1, 1), device=GPU_TYPE, dtype=torch.float32)
aligned_k = get_padded_length(K2, get_alignment_size(a)) + K2
torch._dynamo.mark_dynamic(a, 0)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a)
FileCheck().check(f"K = {aligned_k}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_cat_pad_mm_dyn_m(self):
M1 = 128
M2 = 40
K1 = 129
K2 = 111
N = 100
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = rand_strided(
(K2, N), (1, K2), device=GPU_TYPE, dtype=torch.float32
)
def forward(self, a, b):
c = torch.cat([a, b], dim=0)
a1 = torch.narrow(c, 1, 0, K2)
return torch.mm(a1, self.w)
fn = Model().to(GPU_TYPE)
a = rand_strided((M1, K1), (K1, 1), device=GPU_TYPE, dtype=torch.float32)
b = rand_strided((M2, K1), (K1, 1), device=GPU_TYPE, dtype=torch.float32)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
aligned_k = get_padded_length(K2, get_alignment_size(a)) + K2
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"K = {aligned_k}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_mm_dyn_n(self):
M = 20
K = 81
N = 30
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.mm(a, b)
fn = Model().to(GPU_TYPE)
a = rand_strided((M, K), (K, 1), device=GPU_TYPE, dtype=torch.float32)
b = rand_strided((K, N), (1, K), device=GPU_TYPE, dtype=torch.float32)
aligned_k = get_padded_length(K, get_alignment_size(a)) + K
torch._dynamo.mark_dynamic(b, 1)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"K = {aligned_k}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_mm_dyn_k(self):
M = 21
K = 80
N = 30
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.mm(a, b)
fn = Model().to(GPU_TYPE)
a = rand_strided((M, K), (K, 1), device=GPU_TYPE, dtype=torch.float32)
b = rand_strided((K, N), (1, K), device=GPU_TYPE, dtype=torch.float32)
# TODO: Getting the alignment right requires pattern matcher to
# run on newly added nodes
aligned_m = get_padded_length(M, get_alignment_size(a)) + M
torch._dynamo.mark_dynamic(a, 1)
torch._dynamo.mark_dynamic(b, 0)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"M = {aligned_m}").run(code)
self.assertEqual(res1, res2)
def test_pad_mm_dyn_mnk(self):
M = 20
K = 81
N = 30
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.mm(a, b)
fn = Model().to(GPU_TYPE)
a = rand_strided((M, K), (K, 1), device=GPU_TYPE, dtype=torch.float32)
b = rand_strided((K, N), (1, K), device=GPU_TYPE, dtype=torch.float32)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(a, 1)
torch._dynamo.mark_dynamic(b, 0)
torch._dynamo.mark_dynamic(b, 1)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (_,) = run_and_get_code(compiled_fn, a, b)
self.assertEqual(res1, res2)
@inductor_config.patch(force_shape_pad=True)
def test_zero_dim(self):
def addmm(x, a, b):
return torch.addmm(x, a, b)
x = torch.randn(100).to(GPU_TYPE)
a = torch.randn(0, 10).to(GPU_TYPE)
b = torch.randn(10, 100).to(GPU_TYPE)
self.assertEqual(torch.compile(addmm)(x, a, b), addmm(x, a, b))
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_bmm_dyn_b(self):
B = 10
M = 128
K = 33
N = 40
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.bmm(a, b)
fn = Model().to(GPU_TYPE)
a = torch.randn(B, M, K, device=GPU_TYPE, dtype=torch.float32)
b = torch.randn(B, K, N, device=GPU_TYPE, dtype=torch.float32)
aligned_k = get_padded_length(K, get_alignment_size(a)) + K
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"K = {aligned_k}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_bmm_dyn_k(self):
B = 10
M = 128
K = 40
N = 41
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.bmm(a, b)
fn = Model().to(GPU_TYPE)
a = torch.randn(B, M, K, device=GPU_TYPE, dtype=torch.float32)
b = torch.randn(B, K, N, device=GPU_TYPE, dtype=torch.float32)
aligned_n = get_padded_length(N, get_alignment_size(b)) + N
torch._dynamo.mark_dynamic(a, 2)
torch._dynamo.mark_dynamic(b, 1)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"N = {aligned_n}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_bmm_dyn_bm(self):
B = 10
M = 128
K = 40
N = 41
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.bmm(a, b)
fn = Model().to(GPU_TYPE)
a = torch.randn(B, M, K, device=GPU_TYPE, dtype=torch.float32)
b = torch.randn(B, K, N, device=GPU_TYPE, dtype=torch.float32)
aligned_n = get_padded_length(N, get_alignment_size(b)) + N
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(a, 1)
torch._dynamo.mark_dynamic(b, 0)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b)
FileCheck().check(f"N = {aligned_n}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_addmm_dyn_m(self):
M = 128
K = 33
N = 40
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b, c):
return torch.addmm(a, b, c)
fn = Model().to(GPU_TYPE)
a = torch.randn(M, N, device=GPU_TYPE, dtype=torch.float32)
b = torch.randn(M, K, device=GPU_TYPE, dtype=torch.float32)
c = torch.randn(K, N, device=GPU_TYPE, dtype=torch.float32)
aligned_k = get_padded_length(K, get_alignment_size(b)) + K
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b, c)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b, c)
FileCheck().check(f"K = {aligned_k}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_pad_addmm_dyn_mn(self):
M = 128
K = 33
N = 40
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b, c):
return torch.addmm(a, b, c)
fn = Model().to(GPU_TYPE)
a = torch.randn(M, N, device=GPU_TYPE, dtype=torch.float32)
b = torch.randn(M, K, device=GPU_TYPE, dtype=torch.float32)
c = torch.randn(K, N, device=GPU_TYPE, dtype=torch.float32)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(a, 1)
torch._dynamo.mark_dynamic(b, 0)
torch._dynamo.mark_dynamic(c, 1)
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
res1 = fn(a, b, c)
compiled_fn = torch.compile(fn)
res2, (code,) = run_and_get_code(compiled_fn, a, b, c)
# no padding
FileCheck().check(f"K = {K}").run(code)
self.assertEqual(res1, res2)
@inductor_config.patch(force_shape_pad=True)
def test_pad_single_cat(self):
@torch.compile()
def foo(x, y):
return x @ y
inps = [torch.rand([5, 5], device=GPU_TYPE) for _ in range(2)]
out = foo(*inps)
self.assertEqual(out, inps[0] @ inps[1])
@inductor_config.patch(force_shape_pad=True)
@fresh_cache()
def test_pad_addmm_2d_bias(self):
@torch.compile()
def foo(input, x, y):
return torch.ops.aten.addmm(input, x, y)
for a in [1, 4]:
for b in [1, 6]:
inps = (
torch.rand([a, b], device=GPU_TYPE),
torch.rand([4, 5], device=GPU_TYPE),
torch.rand([5, 6], device=GPU_TYPE),
)
out = foo(*inps)
out_eager = torch.ops.aten.addmm(*inps)
self.assertEqual(out, out_eager)
for a in [1, 6]:
inps = (
torch.rand([a], device=GPU_TYPE),
torch.rand([4, 5], device=GPU_TYPE),
torch.rand([5, 6], device=GPU_TYPE),
)
out = foo(*inps)
out_eager = torch.ops.aten.addmm(*inps)
self.assertEqual(out, out_eager)
@inductor_config.patch(force_shape_pad=True)
def test_pad_batch(self):
m = 6
n = 9
k = 11
batch_size = 3
mat1 = torch.ones((batch_size, m, k), device=GPU_TYPE, dtype=torch.float16)
mat2 = torch.ones((batch_size, k, n), device=GPU_TYPE, dtype=torch.float16)
expected_alignment = get_alignment_size(mat1)
assert expected_alignment == 8, "Alignment for float16 should be 8"
assert should_pad_common(mat1, mat2), (
"This should pass the common padding criteria"
)
@torch.compile()
def bmm(mat1, mat2):
return torch.bmm(mat1, mat2)
res2, (code,) = run_and_get_code(bmm, mat1, mat2)
bmm_expected_result = torch.bmm(mat1, mat2)
# in call code, expect to see a single pad per input, and then we should see padded allocation for output
FileCheck().check("del async_compile").check_count(
".run(", 2, exactly=True
).check(f"empty_strided_{GPU_TYPE}((3, 8, 16)").run(code)
assert torch.allclose(res2, bmm_expected_result), (
"BMM results are not identical"
)
@fresh_cache()
def test_exclude_padding(self):
@torch.compile()
def mm(a, b):
return a @ b
mm(torch.rand([25, 25], device=GPU_TYPE), torch.rand([25, 25], device=GPU_TYPE))
local_cache = get_pad_cache().get_local_cache()
self.assertTrue(len(local_cache) == 2)
FileCheck().check_count("exclude_pad:False", 2, exactly=True).run(
repr(local_cache)
)
@torch.compile()
def mm(a, b):
return (a + 1) @ b
mm(torch.rand([25, 25], device=GPU_TYPE), torch.rand([25, 25], device=GPU_TYPE))
local_cache = get_pad_cache().get_local_cache()
# reuse original base timing
self.assertTrue(len(local_cache) == 3)
FileCheck().check_count("exclude_pad:False", 3, exactly=True).run(
repr(local_cache)
)
FileCheck().check_count("exclude_pad:True", 1, exactly=True).run(
repr(local_cache)
)
@fresh_cache()
@inductor_config.patch(max_pointwise_cat_inputs=2)
def test_exclude_cat_padding(self):
@torch.compile()
def mm(inps, b):
return torch.cat(inps) @ b
inp = torch.rand([2046, 2046], device=GPU_TYPE)
inp2 = torch.rand([2046, 2046], device=GPU_TYPE)
inps = inp.chunk(3)
mm(inps, inp2)
FileCheck().check_count("exclude_pad:False", 2, exactly=True).run(
repr(get_pad_cache().get_local_cache())
)
inps = inp.chunk(2)
mm(inps, inp2)
FileCheck().check_count("exclude_pad:False", 3, exactly=True).run(
repr(get_pad_cache().get_local_cache())
)
@unittest.skipIf(
(not torch.cuda.is_available() or torch.cuda.get_device_capability() >= (9, 0))
and (not torch.xpu.is_available()),
"No perf regression on H100+ with BF16",
)
@skipIfRocm
@fresh_cache()
@inductor_config.patch(
post_grad_fusion_options={"pad_aten_mm_pass": {"k_threshold_to_pad": 8388608}}
)
def test_pad_mm_bf16(self):
m = 2
n = 13
k = 15691904
mat1 = torch.ones((m, k), device=GPU_TYPE, dtype=torch.bfloat16)
mat2 = torch.ones((k, n), device=GPU_TYPE, dtype=torch.bfloat16)
expected_alignment = get_alignment_size(mat1)
assert expected_alignment == 8, "Alignment for bfloat16 should be 8"
assert should_pad_common(mat1, mat2), (
"This should pass the common padding criteria"
)
assert should_pad_mm_bf16(mat1.dtype, m, n, k), (
"This should pass the should_pad_mm_bf16 padding criteria"
)
@torch.compile()
def mm(mat1, mat2):
return torch.mm(mat1, mat2)
res2, (code,) = run_and_get_code(mm, mat1, mat2)
mm_expected_result = torch.mm(mat1, mat2)
# in call code, expect to see a single pad per input, and then we should see padded allocation for output
FileCheck().check("del async_compile").check_count(
".run(", 2, exactly=True
).check(f"empty_strided_{GPU_TYPE}((8, 16)").run(code)
assert torch.allclose(res2, mm_expected_result), "MM results are not identical"
@fresh_cache()
@inductor_config.patch(
{
"triton.unique_kernel_names": "original_aten",
"max_autotune_gemm_backends": "TRITON",
"shape_padding": True,
}
)
def test_original_aten_preserved_pad_mm(self):
def fn(x, y):
return x @ y
args = [
torch.randn(2**4, 2**8 - 1, device=GPU_TYPE, dtype=torch.float16),
torch.randn(2**8 - 1, 2**4, device=GPU_TYPE, dtype=torch.float16),
]
counters.clear()
with unittest.mock.patch(
"torch._inductor.fx_passes.pad_mm._skip_do_bench_times", True
):
opt_fn = torch.compile(fn, mode="max-autotune")
ret, code = run_and_get_code(opt_fn, *args)
self.assertEqual(counters["inductor"]["pattern_matcher_count"], 1)
code = [c for c in code if "decompose_k" not in c]
# The mm kernel should use a template (because we set max_autotune_gemm_backends = TRITON).
# Its name should contain `mm` because `mm` was the original aten op where the mm came from.
FileCheck().check("def triton_tem_fused_mm").run(code[0])
def test_no_autocast_in_pad_bmm_joint_graph_pass(self):
# Track bmm dtypes before and after joint graph passes
bmm_dtypes_pre = {}
bmm_dtypes_post = {}
def make_bmm_dtype_tracker(dtype_dict):
def track_bmm_dtype(graph):
for node in graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.bmm.default
):
# Store the output dtype
if hasattr(node.meta.get("val", None), "dtype"):
dtype_dict[str(node)] = node.meta["val"].dtype
return graph
return track_bmm_dtype
class MaskedMHA(torch.nn.Module):
def __init__(self, H_q, H_kv, D):
super().__init__()
self.H_kv = H_kv
num_heads_total = H_q + 2 * H_kv
self.qkv_proj_vid = torch.nn.Linear(H_q * D, num_heads_total * D)
self.qkv_proj_txt = torch.nn.Linear(H_q * D, num_heads_total * D)
self.out_proj = torch.nn.Linear(H_q * D, H_q * D)
self.H_q = H_q
self.D = D
def forward(self, x_vid, x_txt, attn_mask):
qkv_vid = self.qkv_proj_vid(x_vid)
qkv_txt = self.qkv_proj_txt(x_txt)
qkv_vid = qkv_vid.reshape((*qkv_vid.shape[:-1], -1, self.D))
qkv_txt = qkv_txt.reshape((*qkv_txt.shape[:-1], -1, self.D))
q_vid = qkv_vid[..., : self.H_q, :]
k_vid = qkv_vid[..., self.H_q : self.H_q + self.H_kv, :]
v_vid = qkv_vid[..., self.H_q + self.H_kv :, :]
q_txt = qkv_txt[..., : self.H_q, :]
k_txt = qkv_txt[..., self.H_q : self.H_q + self.H_kv, :]
v_txt = qkv_txt[..., self.H_q + self.H_kv :, :]
q = torch.cat([q_vid, q_txt], dim=-3)
k = torch.cat([k_vid, k_txt], dim=-3)
v = torch.cat([v_vid, v_txt], dim=-3)
out = torch.nn.functional.scaled_dot_product_attention(
q.transpose(-2, -3),
k.transpose(-2, -3),
v.transpose(-2, -3),
attn_mask=attn_mask,
enable_gqa=True,
)
out = out.transpose(-2, -3)
return out
def test_masked_mha(B, H, S, D, device, dtype):
S_vid = 300
S_txt = S - S_vid
x1 = torch.randn(B, S_vid, H * D, requires_grad=True, device=device)
x2 = torch.randn(B, S_txt, H * D, requires_grad=True, device=device)
attn_mask = torch.ones(B, 1, S, S, dtype=torch.bool, device=device)
H_kv = H // 4
mha = MaskedMHA(H, H_kv, D)
mha = mha.to(device)
with torch._inductor.config.patch(
joint_custom_pre_pass=make_bmm_dtype_tracker(bmm_dtypes_pre),
joint_custom_post_pass=make_bmm_dtype_tracker(bmm_dtypes_post),
):
mha = torch.compile(mha, fullgraph=True, backend="inductor")
with torch.autocast(
device_type=GPU_TYPE, dtype=dtype, cache_enabled=False
):
out_vid = mha(x1, x2, attn_mask)
target_vid = torch.randn_like(out_vid)
loss_vid = (out_vid - target_vid).mean()
loss = loss_vid
loss.backward()
torch.accelerator.synchronize()
# Check if any bmm operations had dtype changes
for node_name_pre, node_name_post in zip(
bmm_dtypes_pre, bmm_dtypes_post, strict=True
):
pre_dtype = bmm_dtypes_pre[node_name_pre]
post_dtype = bmm_dtypes_post[node_name_post]
# Assert no bmm output dtype changes
self.assertEqual(pre_dtype, post_dtype)
# Based on issue https://github.com/pytorch/pytorch/issues/159469,
# if autocast was applied in pad_bmm causing bmm's output dtype to be changed from fp32 to bf16,
# gradient will have NaNs in this test case.
self.assertFalse(torch.any(x1.grad.isnan()).item())
self.assertFalse(torch.any(x2.grad.isnan()).item())
B, H, S, D = 2, 32, 549, 128
device = GPU_TYPE
dtype = torch.bfloat16
torch.compiler.reset()
torch.manual_seed(42)
test_masked_mha(B, H, S, D, device, dtype)
if __name__ == "__main__":
if HAS_GPU_AND_TRITON:
run_tests()
| PadMMTest |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_mutating_admission_policy_spec.py | {
"start": 383,
"end": 14859
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'failure_policy': 'str',
'match_conditions': 'list[V1alpha1MatchCondition]',
'match_constraints': 'V1alpha1MatchResources',
'mutations': 'list[V1alpha1Mutation]',
'param_kind': 'V1alpha1ParamKind',
'reinvocation_policy': 'str',
'variables': 'list[V1alpha1Variable]'
}
attribute_map = {
'failure_policy': 'failurePolicy',
'match_conditions': 'matchConditions',
'match_constraints': 'matchConstraints',
'mutations': 'mutations',
'param_kind': 'paramKind',
'reinvocation_policy': 'reinvocationPolicy',
'variables': 'variables'
}
def __init__(self, failure_policy=None, match_conditions=None, match_constraints=None, mutations=None, param_kind=None, reinvocation_policy=None, variables=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1MutatingAdmissionPolicySpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._failure_policy = None
self._match_conditions = None
self._match_constraints = None
self._mutations = None
self._param_kind = None
self._reinvocation_policy = None
self._variables = None
self.discriminator = None
if failure_policy is not None:
self.failure_policy = failure_policy
if match_conditions is not None:
self.match_conditions = match_conditions
if match_constraints is not None:
self.match_constraints = match_constraints
if mutations is not None:
self.mutations = mutations
if param_kind is not None:
self.param_kind = param_kind
if reinvocation_policy is not None:
self.reinvocation_policy = reinvocation_policy
if variables is not None:
self.variables = variables
@property
def failure_policy(self):
"""Gets the failure_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:return: The failure_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: str
"""
return self._failure_policy
@failure_policy.setter
def failure_policy(self, failure_policy):
"""Sets the failure_policy of this V1alpha1MutatingAdmissionPolicySpec.
failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:param failure_policy: The failure_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: str
"""
self._failure_policy = failure_policy
@property
def match_conditions(self):
"""Gets the match_conditions of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
:return: The match_conditions of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: list[V1alpha1MatchCondition]
"""
return self._match_conditions
@match_conditions.setter
def match_conditions(self, match_conditions):
"""Sets the match_conditions of this V1alpha1MutatingAdmissionPolicySpec.
matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
:param match_conditions: The match_conditions of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: list[V1alpha1MatchCondition]
"""
self._match_conditions = match_conditions
@property
def match_constraints(self):
"""Gets the match_constraints of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:return: The match_constraints of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: V1alpha1MatchResources
"""
return self._match_constraints
@match_constraints.setter
def match_constraints(self, match_constraints):
"""Sets the match_constraints of this V1alpha1MutatingAdmissionPolicySpec.
:param match_constraints: The match_constraints of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: V1alpha1MatchResources
"""
self._match_constraints = match_constraints
@property
def mutations(self):
"""Gets the mutations of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis. # noqa: E501
:return: The mutations of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: list[V1alpha1Mutation]
"""
return self._mutations
@mutations.setter
def mutations(self, mutations):
"""Sets the mutations of this V1alpha1MutatingAdmissionPolicySpec.
mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis. # noqa: E501
:param mutations: The mutations of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: list[V1alpha1Mutation]
"""
self._mutations = mutations
@property
def param_kind(self):
"""Gets the param_kind of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:return: The param_kind of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: V1alpha1ParamKind
"""
return self._param_kind
@param_kind.setter
def param_kind(self, param_kind):
"""Sets the param_kind of this V1alpha1MutatingAdmissionPolicySpec.
:param param_kind: The param_kind of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: V1alpha1ParamKind
"""
self._param_kind = param_kind
@property
def reinvocation_policy(self):
"""Gets the reinvocation_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: These mutations will not be called more than once per binding in a single admission evaluation. IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required. # noqa: E501
:return: The reinvocation_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: str
"""
return self._reinvocation_policy
@reinvocation_policy.setter
def reinvocation_policy(self, reinvocation_policy):
"""Sets the reinvocation_policy of this V1alpha1MutatingAdmissionPolicySpec.
reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: These mutations will not be called more than once per binding in a single admission evaluation. IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required. # noqa: E501
:param reinvocation_policy: The reinvocation_policy of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: str
"""
self._reinvocation_policy = reinvocation_policy
@property
def variables(self):
"""Gets the variables of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic. # noqa: E501
:return: The variables of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:rtype: list[V1alpha1Variable]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this V1alpha1MutatingAdmissionPolicySpec.
variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic. # noqa: E501
:param variables: The variables of this V1alpha1MutatingAdmissionPolicySpec. # noqa: E501
:type: list[V1alpha1Variable]
"""
self._variables = variables
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicySpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicySpec):
return True
return self.to_dict() != other.to_dict()
| V1alpha1MutatingAdmissionPolicySpec |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/database.py | {
"start": 1106,
"end": 1887
} | class ____(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
| _Cache |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/sentry/noop.py | {
"start": 1435,
"end": 1984
} | class ____:
"""Blank class for Sentry."""
def add_tagging(self, dag_run: DagRunProtocol, task_instance: RuntimeTaskInstanceProtocol) -> None:
"""Blank function for tagging."""
def add_breadcrumbs(self, task_instance: RuntimeTaskInstanceProtocol) -> None:
"""Blank function for breadcrumbs."""
def enrich_errors(self, run: Run) -> Run:
"""Blank function for formatting a TaskInstance._run_raw_task."""
return run
def flush(self) -> None:
"""Blank function for flushing errors."""
| NoopSentry |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 61278,
"end": 61533
} | class ____(Union):
_fields_ = [
('dVal', c_double),
('uiVal', c_uint),
('ulVal', c_ulong),
('ullVal', c_ulonglong),
('sllVal', c_longlong),
('siVal', c_int),
('usVal', c_ushort),
]
| c_nvmlValue_t |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 4330,
"end": 4410
} | class ____(graphene.ObjectType):
model_id = graphene.String()
| MlflowModelInput |
python | streamlit__streamlit | lib/streamlit/dataframe_util.py | {
"start": 5231,
"end": 5405
} | class ____(Protocol):
"""Protocol for Pandas compatible objects that have a `to_pandas` method."""
def to_pandas(self) -> DataFrame | Series[Any]: ...
| PandasCompatible |
python | google__jax | jax/_src/numpy/linalg.py | {
"start": 1840,
"end": 79349
} | class ____(NamedTuple):
U: Array
S: Array
Vh: Array
def _H(x: ArrayLike) -> Array:
return ufuncs.conjugate(jnp.matrix_transpose(x))
def _symmetrize(x: Array) -> Array: return (x + _H(x)) / 2
@export
@api.jit(static_argnames=['upper', 'symmetrize_input'])
def cholesky(a: ArrayLike, *, upper: bool = False, symmetrize_input: bool = True) -> Array:
"""Compute the Cholesky decomposition of a matrix.
JAX implementation of :func:`numpy.linalg.cholesky`.
The Cholesky decomposition of a matrix `A` is:
.. math::
A = U^HU
or
.. math::
A = LL^H
where `U` is an upper-triangular matrix and `L` is a lower-triangular matrix, and
:math:`X^H` is the Hermitian transpose of `X`.
Args:
a: input array, representing a (batched) positive-definite hermitian matrix.
Must have shape ``(..., N, N)``.
upper: if True, compute the upper Cholesky decomposition `U`. if False
(default), compute the lower Cholesky decomposition `L`.
symmetrize_input: if True (default) then input is symmetrized, which leads
to better behavior under automatic differentiation. Note that when this
is set to True, both the upper and lower triangles of the input will
be used in computing the decomposition.
Returns:
array of shape ``(..., N, N)`` representing the Cholesky decomposition
of the input. If the input is not Hermitian positive-definite, the result
will contain NaN entries.
See also:
- :func:`jax.scipy.linalg.cholesky`: SciPy-style Cholesky API
- :func:`jax.lax.linalg.cholesky`: XLA-style Cholesky API
Examples:
A small real Hermitian positive-definite matrix:
>>> x = jnp.array([[2., 1.],
... [1., 2.]])
Lower Cholesky factorization:
>>> jnp.linalg.cholesky(x)
Array([[1.4142135 , 0. ],
[0.70710677, 1.2247449 ]], dtype=float32)
Upper Cholesky factorization:
>>> jnp.linalg.cholesky(x, upper=True)
Array([[1.4142135 , 0.70710677],
[0. , 1.2247449 ]], dtype=float32)
Reconstructing ``x`` from its factorization:
>>> L = jnp.linalg.cholesky(x)
>>> jnp.allclose(x, L @ L.T)
Array(True, dtype=bool)
"""
a = ensure_arraylike("jnp.linalg.cholesky", a)
a, = promote_dtypes_inexact(a)
L = lax_linalg.cholesky(a, symmetrize_input=symmetrize_input)
return L.mT.conj() if upper else L
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
*,
compute_uv: Literal[True],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> SVDResult:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool,
compute_uv: Literal[True],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> SVDResult:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
*,
compute_uv: Literal[False],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool,
compute_uv: Literal[False],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
compute_uv: bool = True,
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array | SVDResult:
...
@export
@partial(
api.jit,
static_argnames=(
"full_matrices",
"compute_uv",
"hermitian",
"subset_by_index",
),
)
def svd(
a: ArrayLike,
full_matrices: bool = True,
compute_uv: bool = True,
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array | SVDResult:
r"""Compute the singular value decomposition.
JAX implementation of :func:`numpy.linalg.svd`, implemented in terms of
:func:`jax.lax.linalg.svd`.
The SVD of a matrix `A` is given by
.. math::
A = U\Sigma V^H
- :math:`U` contains the left singular vectors and satisfies :math:`U^HU=I`
- :math:`V` contains the right singular vectors and satisfies :math:`V^HV=I`
- :math:`\Sigma` is a diagonal matrix of singular values.
Args:
a: input array, of shape ``(..., N, M)``
full_matrices: if True (default) compute the full matrices; i.e. ``u`` and ``vh`` have
shape ``(..., N, N)`` and ``(..., M, M)``. If False, then the shapes are
``(..., N, K)`` and ``(..., K, M)`` with ``K = min(N, M)``.
compute_uv: if True (default), return the full SVD ``(u, s, vh)``. If False then return
only the singular values ``s``.
hermitian: if True, assume the matrix is hermitian, which allows for a more efficient
implementation (default=False)
subset_by_index: (TPU-only) Optional 2-tuple [start, end] indicating the range of
indices of singular values to compute. For example, if ``[n-2, n]`` then
``svd`` computes the two largest singular values and their singular vectors.
Only compatible with ``full_matrices=False``.
Returns:
A tuple of arrays ``(u, s, vh)`` if ``compute_uv`` is True, otherwise the array ``s``.
- ``u``: left singular vectors of shape ``(..., N, N)`` if ``full_matrices`` is True
or ``(..., N, K)`` otherwise.
- ``s``: singular values of shape ``(..., K)``
- ``vh``: conjugate-transposed right singular vectors of shape ``(..., M, M)``
if ``full_matrices`` is True or ``(..., K, M)`` otherwise.
where ``K = min(N, M)``.
See also:
- :func:`jax.scipy.linalg.svd`: SciPy-style SVD API
- :func:`jax.lax.linalg.svd`: XLA-style SVD API
Examples:
Consider the SVD of a small real-valued array:
>>> x = jnp.array([[1., 2., 3.],
... [6., 5., 4.]])
>>> u, s, vt = jnp.linalg.svd(x, full_matrices=False)
>>> s # doctest: +SKIP
Array([9.361919 , 1.8315067], dtype=float32)
The singular vectors are in the columns of ``u`` and ``v = vt.T``. These vectors are
orthonormal, which can be demonstrated by comparing the matrix product with the
identity matrix:
>>> jnp.allclose(u.T @ u, jnp.eye(2), atol=1E-5)
Array(True, dtype=bool)
>>> v = vt.T
>>> jnp.allclose(v.T @ v, jnp.eye(2), atol=1E-5)
Array(True, dtype=bool)
Given the SVD, ``x`` can be reconstructed via matrix multiplication:
>>> x_reconstructed = u @ jnp.diag(s) @ vt
>>> jnp.allclose(x_reconstructed, x)
Array(True, dtype=bool)
"""
a = ensure_arraylike("jnp.linalg.svd", a)
a, = promote_dtypes_inexact(a)
if hermitian:
w, v = lax_linalg.eigh(a, subset_by_index=subset_by_index)
s = lax.abs(v)
if compute_uv:
sign = lax.sign(v)
idx_dtype = lax_utils.int_dtype_for_dim(
s.shape[s.ndim - 1], signed=False)
idxs = lax.broadcasted_iota(idx_dtype, s.shape, dimension=s.ndim - 1)
s, idxs, sign = lax.sort((s, idxs, sign), dimension=-1, num_keys=1)
s = lax.rev(s, dimensions=[s.ndim - 1])
idxs = lax.rev(idxs, dimensions=[s.ndim - 1])
sign = lax.rev(sign, dimensions=[s.ndim - 1])
u = indexing.take_along_axis(w, idxs[..., None, :], axis=-1)
vh = _H(u * sign[..., None, :].astype(u.dtype))
return SVDResult(u, s, vh)
else:
return lax.rev(lax.sort(s, dimension=-1), dimensions=[s.ndim-1])
if compute_uv:
u, s, vh = lax_linalg.svd(
a,
full_matrices=full_matrices,
compute_uv=True,
subset_by_index=subset_by_index,
)
return SVDResult(u, s, vh)
else:
return lax_linalg.svd(
a,
full_matrices=full_matrices,
compute_uv=False,
subset_by_index=subset_by_index,
)
@export
@api.jit(static_argnames=('n',))
def matrix_power(a: ArrayLike, n: int) -> Array:
"""Raise a square matrix to an integer power.
JAX implementation of :func:`numpy.linalg.matrix_power`, implemented via
repeated squarings.
Args:
a: array of shape ``(..., M, M)`` to be raised to the power `n`.
n: the integer exponent to which the matrix should be raised.
Returns:
Array of shape ``(..., M, M)`` containing the matrix power of a to the n.
Examples:
>>> a = jnp.array([[1., 2.],
... [3., 4.]])
>>> jnp.linalg.matrix_power(a, 3)
Array([[ 37., 54.],
[ 81., 118.]], dtype=float32)
>>> a @ a @ a # equivalent evaluated directly
Array([[ 37., 54.],
[ 81., 118.]], dtype=float32)
This also supports zero powers:
>>> jnp.linalg.matrix_power(a, 0)
Array([[1., 0.],
[0., 1.]], dtype=float32)
and also supports negative powers:
>>> with jnp.printoptions(precision=3):
... jnp.linalg.matrix_power(a, -2)
Array([[ 5.5 , -2.5 ],
[-3.75, 1.75]], dtype=float32)
Negative powers are equivalent to matmul of the inverse:
>>> inv_a = jnp.linalg.inv(a)
>>> with jnp.printoptions(precision=3):
... inv_a @ inv_a
Array([[ 5.5 , -2.5 ],
[-3.75, 1.75]], dtype=float32)
"""
arr = ensure_arraylike("jnp.linalg.matrix_power", a)
if arr.ndim < 2:
raise TypeError("{}-dimensional array given. Array must be at least "
"two-dimensional".format(arr.ndim))
if arr.shape[-2] != arr.shape[-1]:
raise TypeError("Last 2 dimensions of the array must be square")
try:
n = operator.index(n)
except TypeError as err:
raise TypeError(f"exponent must be an integer, got {n}") from err
if n == 0:
return jnp.broadcast_to(jnp.eye(arr.shape[-2], dtype=arr.dtype), arr.shape)
elif n < 0:
arr = inv(arr)
n = abs(n)
if n == 1:
return arr
elif n == 2:
return arr @ arr
elif n == 3:
return (arr @ arr) @ arr
z = result = None
while n > 0:
z = arr if z is None else (z @ z) # type: ignore[operator]
n, bit = divmod(n, 2)
if bit:
result = z if result is None else (result @ z)
assert result is not None
return result
@export
@api.jit
def matrix_rank(
M: ArrayLike, rtol: ArrayLike | None = None, *, tol: ArrayLike | None = None) -> Array:
"""Compute the rank of a matrix.
JAX implementation of :func:`numpy.linalg.matrix_rank`.
The rank is calculated via the Singular Value Decomposition (SVD), and determined
by the number of singular values greater than the specified tolerance.
Args:
M: array of shape ``(..., N, K)`` whose rank is to be computed.
rtol: optional array of shape ``(...)`` specifying the tolerance. Singular values
smaller than `rtol * largest_singular_value` are considered to be zero. If
``rtol`` is None (the default), a reasonable default is chosen based the
floating point precision of the input.
tol: alias of the ``rtol`` argument present for backward compatibility.
Only one of `rtol` or `tol` may be specified.
Returns:
array of shape ``a.shape[-2]`` giving the matrix rank.
Notes:
The rank calculation may be inaccurate for matrices with very small singular
values or those that are numerically ill-conditioned. Consider adjusting the
``rtol`` parameter or using a more specialized rank computation method in such cases.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.linalg.matrix_rank(a)
Array(2, dtype=int32)
>>> b = jnp.array([[1, 0], # Rank-deficient matrix
... [0, 0]])
>>> jnp.linalg.matrix_rank(b)
Array(1, dtype=int32)
"""
M = ensure_arraylike("jnp.linalg.matrix_rank", M)
if tol is not None:
if rtol is not None:
raise ValueError("matrix_rank: only one of tol or rtol may be specified.")
rtol = tol
del tol
M, = promote_dtypes_inexact(M)
if M.ndim < 2:
return (M != 0).any().astype(np.int32)
S = svd(M, full_matrices=False, compute_uv=False)
if rtol is None:
rtol = S.max(-1) * np.max(M.shape[-2:]).astype(S.dtype) * jnp.finfo(S.dtype).eps
rtol = jnp.expand_dims(rtol, np.ndim(rtol))
return reductions.sum(S > rtol, axis=-1)
@custom_jvp
def _slogdet_lu(a: Array) -> tuple[Array, Array]:
dtype = lax.dtype(a)
lu, pivot, _ = lax_linalg.lu(a)
diag = jnp.diagonal(lu, axis1=-2, axis2=-1)
is_zero = reductions.any(diag == jnp.array(0, dtype=dtype), axis=-1)
iota = lax.expand_dims(jnp.arange(a.shape[-1], dtype=pivot.dtype),
range(pivot.ndim - 1))
parity = reductions.count_nonzero(pivot != iota, axis=-1)
if jnp.iscomplexobj(a):
sign = reductions.prod(diag / ufuncs.abs(diag).astype(diag.dtype), axis=-1)
else:
sign = jnp.array(1, dtype=dtype)
parity = parity + reductions.count_nonzero(diag < 0, axis=-1)
sign = jnp.where(is_zero,
jnp.array(0, dtype=dtype),
sign * jnp.array(-2 * (parity % 2) + 1, dtype=dtype))
logdet = jnp.where(
is_zero, jnp.array(-np.inf, dtype=dtype),
reductions.sum(ufuncs.log(ufuncs.abs(diag)).astype(dtype), axis=-1))
return sign, ufuncs.real(logdet)
@custom_jvp
def _slogdet_qr(a: Array) -> tuple[Array, Array]:
# Implementation of slogdet using QR decomposition. One reason we might prefer
# QR decomposition is that it is more amenable to a fast batched
# implementation on TPU because of the lack of row pivoting.
if jnp.issubdtype(lax.dtype(a), np.complexfloating):
raise NotImplementedError("slogdet method='qr' not implemented for complex "
"inputs")
n = a.shape[-1]
a, taus = lax_linalg.geqrf(a)
# The determinant of a triangular matrix is the product of its diagonal
# elements. We are working in log space, so we compute the magnitude as the
# the trace of the log-absolute values, and we compute the sign separately.
a_diag = jnp.diagonal(a, axis1=-2, axis2=-1)
log_abs_det = reductions.sum(ufuncs.log(ufuncs.abs(a_diag)), axis=-1)
sign_diag = reductions.prod(ufuncs.sign(a_diag), axis=-1)
# The determinant of a Householder reflector is -1. So whenever we actually
# made a reflection (tau != 0), multiply the result by -1.
sign_taus = reductions.prod(jnp.where(taus[..., :(n-1)] != 0, -1, 1), axis=-1).astype(sign_diag.dtype)
return sign_diag * sign_taus, log_abs_det
@export
@api.jit(static_argnames=('method',))
def slogdet(a: ArrayLike, *, method: str | None = None) -> SlogdetResult:
"""
Compute the sign and (natural) logarithm of the determinant of an array.
JAX implementation of :func:`numpy.linalg.slogdet`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the sign and log determinant.
method: the method to use for determinant computation. Options are
- ``'lu'`` (default): use the LU decomposition.
- ``'qr'``: use the QR decomposition.
Returns:
A tuple of arrays ``(sign, logabsdet)``, each of shape ``a.shape[:-2]``
- ``sign`` is the sign of the determinant.
- ``logabsdet`` is the natural log of the determinant's absolute value.
See also:
:func:`jax.numpy.linalg.det`: direct computation of determinant
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> sign, logabsdet = jnp.linalg.slogdet(a)
>>> sign # -1 indicates negative determinant
Array(-1., dtype=float32)
>>> jnp.exp(logabsdet) # Absolute value of determinant
Array(2., dtype=float32)
"""
a = ensure_arraylike("jnp.linalg.slogdet", a)
a, = promote_dtypes_inexact(a)
a_shape = np.shape(a)
if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:
raise ValueError(f"Argument to slogdet() must have shape [..., n, n], got {a_shape}")
if method is None or method == "lu":
return SlogdetResult(*_slogdet_lu(a))
elif method == "qr":
return SlogdetResult(*_slogdet_qr(a))
else:
raise ValueError(f"Unknown slogdet method '{method}'. Supported methods "
"are 'lu' (`None`), and 'qr'.")
def _slogdet_jvp(primals, tangents):
x, = primals
g, = tangents
sign, ans = slogdet(x)
ans_dot = jnp.trace(solve(x, g), axis1=-1, axis2=-2)
if jnp.issubdtype(jnp._dtype(x), np.complexfloating):
sign_dot = (ans_dot - ufuncs.real(ans_dot).astype(ans_dot.dtype)) * sign
ans_dot = ufuncs.real(ans_dot)
else:
sign_dot = array_creation.zeros_like(sign)
return (sign, ans), (sign_dot, ans_dot)
_slogdet_lu.defjvp(_slogdet_jvp)
_slogdet_qr.defjvp(_slogdet_jvp)
def _cofactor_solve(a: ArrayLike, b: ArrayLike) -> tuple[Array, Array]:
"""Equivalent to det(a)*solve(a, b) for nonsingular mat.
Intermediate function used for jvp and vjp of det.
This function borrows heavily from jax.numpy.linalg.solve and
jax.numpy.linalg.slogdet to compute the gradient of the determinant
in a way that is well defined even for low rank matrices.
This function handles two different cases:
* rank(a) == n or n-1
* rank(a) < n-1
For rank n-1 matrices, the gradient of the determinant is a rank 1 matrix.
Rather than computing det(a)*solve(a, b), which would return NaN, we work
directly with the LU decomposition. If a = p @ l @ u, then
det(a)*solve(a, b) =
prod(diag(u)) * u^-1 @ l^-1 @ p^-1 b =
prod(diag(u)) * triangular_solve(u, solve(p @ l, b))
If a is rank n-1, then the lower right corner of u will be zero and the
triangular_solve will fail.
Let x = solve(p @ l, b) and y = det(a)*solve(a, b).
Then y_{n}
x_{n} / u_{nn} * prod_{i=1...n}(u_{ii}) =
x_{n} * prod_{i=1...n-1}(u_{ii})
So by replacing the lower-right corner of u with prod_{i=1...n-1}(u_{ii})^-1
we can avoid the triangular_solve failing.
To correctly compute the rest of y_{i} for i != n, we simply multiply
x_{i} by det(a) for all i != n, which will be zero if rank(a) = n-1.
For the second case, a check is done on the matrix to see if `solve`
returns NaN or Inf, and gives a matrix of zeros as a result, as the
gradient of the determinant of a matrix with rank less than n-1 is 0.
This will still return the correct value for rank n-1 matrices, as the check
is applied *after* the lower right corner of u has been updated.
Args:
a: A square matrix or batch of matrices, possibly singular.
b: A matrix, or batch of matrices of the same dimension as a.
Returns:
det(a) and cofactor(a)^T*b, aka adjugate(a)*b
"""
a, b = ensure_arraylike("jnp.linalg._cofactor_solve", a, b)
a, = promote_dtypes_inexact(a)
b, = promote_dtypes_inexact(b)
a_shape = np.shape(a)
b_shape = np.shape(b)
a_ndims = len(a_shape)
if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2]
and b_shape[-2:] == a_shape[-2:]):
msg = ("The arguments to _cofactor_solve must have shapes "
"a=[..., m, m] and b=[..., m, m]; got a={} and b={}")
raise ValueError(msg.format(a_shape, b_shape))
if a_shape[-1] == 1:
return a[..., 0, 0], b
# lu contains u in the upper triangular matrix and l in the strict lower
# triangular matrix.
# The diagonal of l is set to ones without loss of generality.
lu, pivots, permutation = lax_linalg.lu(a)
dtype = lax.dtype(a)
batch_dims = lax.broadcast_shapes(lu.shape[:-2], b.shape[:-2])
x = jnp.broadcast_to(b, batch_dims + b.shape[-2:])
lu = jnp.broadcast_to(lu, batch_dims + lu.shape[-2:])
# Compute (partial) determinant, ignoring last diagonal of LU
diag = jnp.diagonal(lu, axis1=-2, axis2=-1)
iota = lax.expand_dims(jnp.arange(a_shape[-1], dtype=pivots.dtype),
range(pivots.ndim - 1))
parity = reductions.count_nonzero(pivots != iota, axis=-1)
sign = jnp.asarray(-2 * (parity % 2) + 1, dtype=dtype)
# partial_det[:, -1] contains the full determinant and
# partial_det[:, -2] contains det(u) / u_{nn}.
partial_det = reductions.cumprod(diag, axis=-1) * sign[..., None]
lu = lu.at[..., -1, -1].set(1.0 / partial_det[..., -2])
permutation = jnp.broadcast_to(permutation, (*batch_dims, a_shape[-1]))
iotas = jnp.ix_(*(lax.iota(np.int32, b) for b in (*batch_dims, 1)))
# filter out any matrices that are not full rank
d = array_creation.ones(x.shape[:-1], x.dtype)
d = lax_linalg.triangular_solve(lu, d, left_side=True, lower=False)
d = reductions.any(ufuncs.logical_or(ufuncs.isnan(d), ufuncs.isinf(d)), axis=-1)
d = jnp.tile(d[..., None, None], d.ndim*(1,) + x.shape[-2:])
x = jnp.where(d, array_creation.zeros_like(x), x) # first filter
x = x[iotas[:-1] + (permutation, slice(None))]
x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,
unit_diagonal=True)
x = jnp.concatenate((x[..., :-1, :] * partial_det[..., -1, None, None],
x[..., -1:, :]), axis=-2)
x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)
x = jnp.where(d, array_creation.zeros_like(x), x) # second filter
return partial_det[..., -1], x
def _det_2x2(a: Array) -> Array:
return (a[..., 0, 0] * a[..., 1, 1] -
a[..., 0, 1] * a[..., 1, 0])
def _det_3x3(a: Array) -> Array:
return (a[..., 0, 0] * a[..., 1, 1] * a[..., 2, 2] +
a[..., 0, 1] * a[..., 1, 2] * a[..., 2, 0] +
a[..., 0, 2] * a[..., 1, 0] * a[..., 2, 1] -
a[..., 0, 2] * a[..., 1, 1] * a[..., 2, 0] -
a[..., 0, 0] * a[..., 1, 2] * a[..., 2, 1] -
a[..., 0, 1] * a[..., 1, 0] * a[..., 2, 2])
@custom_jvp
def _det(a):
sign, logdet = slogdet(a)
return sign * ufuncs.exp(logdet).astype(sign.dtype)
@_det.defjvp
def _det_jvp(primals, tangents):
x, = primals
g, = tangents
y, z = _cofactor_solve(x, g)
return y, jnp.trace(z, axis1=-1, axis2=-2)
@export
@api.jit
def det(a: ArrayLike) -> Array:
"""
Compute the determinant of an array.
JAX implementation of :func:`numpy.linalg.det`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the determinant.
Returns:
An array of determinants of shape ``a.shape[:-2]``.
See also:
:func:`jax.scipy.linalg.det`: Scipy-style API for determinant.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.linalg.det(a)
Array(-2., dtype=float32)
"""
a = ensure_arraylike("jnp.linalg.det", a)
a, = promote_dtypes_inexact(a)
a_shape = np.shape(a)
if len(a_shape) >= 2 and a_shape[-1] == 2 and a_shape[-2] == 2:
return _det_2x2(a)
elif len(a_shape) >= 2 and a_shape[-1] == 3 and a_shape[-2] == 3:
return _det_3x3(a)
elif len(a_shape) >= 2 and a_shape[-1] == a_shape[-2]:
return _det(a)
else:
msg = "Argument to _det() must have shape [..., n, n], got {}"
raise ValueError(msg.format(a_shape))
@export
def eig(a: ArrayLike) -> EigResult:
"""
Compute the eigenvalues and eigenvectors of a square array.
JAX implementation of :func:`numpy.linalg.eig`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the eigenvalues and vectors.
Returns:
A namedtuple ``(eigenvalues, eigenvectors)``. The namedtuple has fields:
- ``eigenvalues``: an array of shape ``(..., M)`` containing the eigenvalues.
- ``eigenvectors``: an array of shape ``(..., M, M)``, where column ``v[:, i]`` is the
eigenvector corresponding to the eigenvalue ``w[i]``.
Notes:
- This differs from :func:`numpy.linalg.eig` in that the return type of
:func:`jax.numpy.linalg.eig` is always complex64 for 32-bit input, and complex128
for 64-bit input.
- At present, non-symmetric eigendecomposition is only implemented on the CPU and
GPU backends. For more details about the GPU implementation, see the
documentation for :func:`jax.lax.linalg.eig`.
See also:
- :func:`jax.numpy.linalg.eigh`: eigenvectors and eigenvalues of a Hermitian matrix.
- :func:`jax.numpy.linalg.eigvals`: compute eigenvalues only.
Examples:
>>> a = jnp.array([[1., 2.],
... [2., 1.]])
>>> w, v = jnp.linalg.eig(a)
>>> with jax.numpy.printoptions(precision=4):
... w
Array([ 3.+0.j, -1.+0.j], dtype=complex64)
>>> v
Array([[ 0.70710677+0.j, -0.70710677+0.j],
[ 0.70710677+0.j, 0.70710677+0.j]], dtype=complex64)
"""
a = ensure_arraylike("jnp.linalg.eig", a)
a, = promote_dtypes_inexact(a)
w, v = lax_linalg.eig(a, compute_left_eigenvectors=False)
return EigResult(w, v)
@export
@api.jit
def eigvals(a: ArrayLike) -> Array:
"""
Compute the eigenvalues of a general matrix.
JAX implementation of :func:`numpy.linalg.eigvals`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the eigenvalues.
Returns:
An array of shape ``(..., M)`` containing the eigenvalues.
See also:
- :func:`jax.numpy.linalg.eig`: computes eigenvalues eigenvectors of a general matrix.
- :func:`jax.numpy.linalg.eigh`: computes eigenvalues eigenvectors of a Hermitian matrix.
Notes:
- This differs from :func:`numpy.linalg.eigvals` in that the return type of
:func:`jax.numpy.linalg.eigvals` is always complex64 for 32-bit input, and
complex128 for 64-bit input.
- At present, non-symmetric eigendecomposition is only implemented on the CPU backend.
Examples:
>>> a = jnp.array([[1., 2.],
... [2., 1.]])
>>> w = jnp.linalg.eigvals(a)
>>> with jnp.printoptions(precision=2):
... w
Array([ 3.+0.j, -1.+0.j], dtype=complex64)
"""
a = ensure_arraylike("jnp.linalg.eigvals", a)
a, = promote_dtypes_inexact(a)
return lax_linalg.eig(a, compute_left_eigenvectors=False,
compute_right_eigenvectors=False)[0]
@export
@api.jit(static_argnames=('UPLO', 'symmetrize_input'))
def eigh(a: ArrayLike, UPLO: str | None = None,
symmetrize_input: bool = True) -> EighResult:
"""
Compute the eigenvalues and eigenvectors of a Hermitian matrix.
JAX implementation of :func:`numpy.linalg.eigh`.
Args:
a: array of shape ``(..., M, M)``, containing the Hermitian (if complex)
or symmetric (if real) matrix.
UPLO: specifies whether the calculation is done with the lower triangular
part of ``a`` (``'L'``, default) or the upper triangular part (``'U'``).
symmetrize_input: if True (default) then input is symmetrized, which leads
to better behavior under automatic differentiation. Note that when this
is set to True, both the upper and lower triangles of the input will
be used in computing the decomposition.
Returns:
A namedtuple ``(eigenvalues, eigenvectors)`` where
- ``eigenvalues``: an array of shape ``(..., M)`` containing the eigenvalues,
sorted in ascending order.
- ``eigenvectors``: an array of shape ``(..., M, M)``, where column ``v[:, i]`` is the
normalized eigenvector corresponding to the eigenvalue ``w[i]``.
See also:
- :func:`jax.numpy.linalg.eig`: general eigenvalue decomposition.
- :func:`jax.numpy.linalg.eigvalsh`: compute eigenvalues only.
- :func:`jax.scipy.linalg.eigh`: SciPy API for Hermitian eigendecomposition.
- :func:`jax.lax.linalg.eigh`: XLA API for Hermitian eigendecomposition.
Examples:
>>> a = jnp.array([[1, -2j],
... [2j, 1]])
>>> w, v = jnp.linalg.eigh(a)
>>> w
Array([-1., 3.], dtype=float32)
>>> with jnp.printoptions(precision=3):
... v
Array([[-0.707+0.j , -0.707+0.j ],
[ 0. +0.707j, 0. -0.707j]], dtype=complex64)
"""
a = ensure_arraylike("jnp.linalg.eigh", a)
if UPLO is None or UPLO == "L":
lower = True
elif UPLO == "U":
lower = False
else:
msg = f"UPLO must be one of None, 'L', or 'U', got {UPLO}"
raise ValueError(msg)
a, = promote_dtypes_inexact(a)
v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)
return EighResult(w, v)
@export
@api.jit(static_argnames=('UPLO', 'symmetrize_input'))
def eigvalsh(a: ArrayLike, UPLO: str | None = 'L', *,
symmetrize_input: bool = True) -> Array:
"""
Compute the eigenvalues of a Hermitian matrix.
JAX implementation of :func:`numpy.linalg.eigvalsh`.
Args:
a: array of shape ``(..., M, M)``, containing the Hermitian (if complex)
or symmetric (if real) matrix.
UPLO: specifies whether the calculation is done with the lower triangular
part of ``a`` (``'L'``, default) or the upper triangular part (``'U'``).
symmetrize_input: if True (default) then input is symmetrized, which leads
to better behavior under automatic differentiation. Note that when this
is set to True, both the upper and lower triangles of the input will
be used in computing the decomposition.
Returns:
An array of shape ``(..., M)`` containing the eigenvalues, sorted in
ascending order.
See also:
- :func:`jax.numpy.linalg.eig`: general eigenvalue decomposition.
- :func:`jax.numpy.linalg.eigh`: computes eigenvalues and eigenvectors of a
Hermitian matrix.
Examples:
>>> a = jnp.array([[1, -2j],
... [2j, 1]])
>>> w = jnp.linalg.eigvalsh(a)
>>> w
Array([-1., 3.], dtype=float32)
"""
a = ensure_arraylike("jnp.linalg.eigvalsh", a)
a, = promote_dtypes_inexact(a)
w, _ = eigh(a, UPLO, symmetrize_input=symmetrize_input)
return w
# TODO(micky774): deprecated 2024-5-14, remove wrapper after deprecation expires.
@export
def pinv(a: ArrayLike, rtol: ArrayLike | None = None,
hermitian: bool = False, *, rcond: ArrayLike | None = None) -> Array:
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
JAX implementation of :func:`numpy.linalg.pinv`.
Args:
a: array of shape ``(..., M, N)`` containing matrices to pseudo-invert.
rtol: float or array_like of shape ``a.shape[:-2]``. Specifies the cutoff
for small singular values.of shape ``(...,)``.
Cutoff for small singular values; singular values smaller
``rtol * largest_singular_value`` are treated as zero. The default is
determined based on the floating point precision of the dtype.
hermitian: if True, then the input is assumed to be Hermitian, and a more
efficient algorithm is used (default: False)
rcond: alias of the `rtol` argument, present for backward compatibility.
Only one of `rtol` and `rcond` may be specified.
Returns:
An array of shape ``(..., N, M)`` containing the pseudo-inverse of ``a``.
See also:
- :func:`jax.numpy.linalg.inv`: multiplicative inverse of a square matrix.
Notes:
:func:`jax.numpy.linalg.pinv` differs from :func:`numpy.linalg.pinv` in the
default value of `rcond``: in NumPy, the default is `1e-15`. In JAX, the
default is ``10. * max(num_rows, num_cols) * jnp.finfo(dtype).eps``.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4],
... [5, 6]])
>>> a_pinv = jnp.linalg.pinv(a)
>>> a_pinv # doctest: +SKIP
Array([[-1.333332 , -0.33333257, 0.6666657 ],
[ 1.0833322 , 0.33333272, -0.41666582]], dtype=float32)
The pseudo-inverse operates as a multiplicative inverse so long as the
output is not rank-deficient:
>>> jnp.allclose(a_pinv @ a, jnp.eye(2), atol=1E-4)
Array(True, dtype=bool)
"""
if rcond is not None:
if rtol is not None:
raise ValueError("pinv: only one of rtol and rcond may be specified.")
rtol = rcond
del rcond
return _pinv(a, rtol, hermitian)
@partial(custom_jvp, nondiff_argnums=(1, 2))
@api.jit(static_argnames=('hermitian'))
def _pinv(a: ArrayLike, rtol: ArrayLike | None = None, hermitian: bool = False) -> Array:
# Uses same algorithm as
# https://github.com/numpy/numpy/blob/v1.17.0/numpy/linalg/linalg.py#L1890-L1979
a = ensure_arraylike("jnp.linalg.pinv", a)
arr, = promote_dtypes_inexact(a)
m, n = arr.shape[-2:]
if m == 0 or n == 0:
return array_creation.empty(arr.shape[:-2] + (n, m), arr.dtype)
arr = ufuncs.conj(arr)
if rtol is None:
max_rows_cols = max(arr.shape[-2:])
rtol = 10. * max_rows_cols * jnp.array(jnp.finfo(arr.dtype).eps)
rtol = jnp.asarray(rtol)
u, s, vh = svd(arr, full_matrices=False, hermitian=hermitian)
# Singular values less than or equal to ``rtol * largest_singular_value``
# are set to zero.
rtol = lax.expand_dims(rtol[..., np.newaxis], range(s.ndim - rtol.ndim - 1))
cutoff = rtol * s[..., 0:1]
s = jnp.where(s > cutoff, s, np.inf).astype(u.dtype)
res = tensor_contractions.matmul(vh.mT, ufuncs.divide(u.mT, s[..., np.newaxis]),
precision=lax.Precision.HIGHEST)
return lax.convert_element_type(res, arr.dtype)
@_pinv.defjvp
@config.default_matmul_precision("float32")
def _pinv_jvp(rtol, hermitian, primals, tangents):
# The Differentiation of Pseudo-Inverses and Nonlinear Least Squares Problems
# Whose Variables Separate. Author(s): G. H. Golub and V. Pereyra. SIAM
# Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), pp. 413-432.
# (via https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Derivative)
a, = primals # m x n
a_dot, = tangents
p = pinv(a, rtol=rtol, hermitian=hermitian) # n x m
if hermitian:
# svd(..., hermitian=True) symmetrizes its input, and the JVP must match.
a = _symmetrize(a)
a_dot = _symmetrize(a_dot)
# TODO(phawkins): this could be simplified in the Hermitian case if we
# supported triangular matrix multiplication.
m, n = a.shape[-2:]
if m >= n:
s = (p @ _H(p)) @ _H(a_dot) # nxm
t = (_H(a_dot) @ _H(p)) @ p # nxm
p_dot = -(p @ a_dot) @ p + s - (s @ a) @ p + t - (p @ a) @ t
else: # m < n
s = p @ (_H(p) @ _H(a_dot))
t = _H(a_dot) @ (_H(p) @ p)
p_dot = -p @ (a_dot @ p) + s - s @ (a @ p) + t - p @ (a @ t)
return p, p_dot
@export
@api.jit
def inv(a: ArrayLike) -> Array:
"""Return the inverse of a square matrix
JAX implementation of :func:`numpy.linalg.inv`.
Args:
a: array of shape ``(..., N, N)`` specifying square array(s) to be inverted.
Returns:
Array of shape ``(..., N, N)`` containing the inverse of the input.
Notes:
In most cases, explicitly computing the inverse of a matrix is ill-advised. For
example, to compute ``x = inv(A) @ b``, it is more performant and numerically
precise to use a direct solve, such as :func:`jax.scipy.linalg.solve`.
See Also:
- :func:`jax.scipy.linalg.inv`: SciPy-style API for matrix inverse
- :func:`jax.numpy.linalg.solve`: direct linear solver
Examples:
Compute the inverse of a 3x3 matrix
>>> a = jnp.array([[1., 2., 3.],
... [2., 4., 2.],
... [3., 2., 1.]])
>>> a_inv = jnp.linalg.inv(a)
>>> a_inv # doctest: +SKIP
Array([[ 0. , -0.25 , 0.5 ],
[-0.25 , 0.5 , -0.25000003],
[ 0.5 , -0.25 , 0. ]], dtype=float32)
Check that multiplying with the inverse gives the identity:
>>> jnp.allclose(a @ a_inv, jnp.eye(3), atol=1E-5)
Array(True, dtype=bool)
Multiply the inverse by a vector ``b``, to find a solution to ``a @ x = b``
>>> b = jnp.array([1., 4., 2.])
>>> a_inv @ b
Array([ 0. , 1.25, -0.5 ], dtype=float32)
Note, however, that explicitly computing the inverse in such a case can lead
to poor performance and loss of precision as the size of the problem grows.
Instead, you should use a direct solver like :func:`jax.numpy.linalg.solve`:
>>> jnp.linalg.solve(a, b)
Array([ 0. , 1.25, -0.5 ], dtype=float32)
"""
arr = ensure_arraylike("jnp.linalg.inv", a)
if arr.ndim < 2 or arr.shape[-1] != arr.shape[-2]:
raise ValueError(
f"Argument to inv must have shape [..., n, n], got {arr.shape}.")
return solve(
arr, lax.broadcast(jnp.eye(arr.shape[-1], dtype=arr.dtype), arr.shape[:-2]))
@export
@api.jit(static_argnames=('ord', 'axis', 'keepdims'))
def norm(x: ArrayLike, ord: int | str | None = None,
axis: None | tuple[int, ...] | int = None,
keepdims: bool = False) -> Array:
"""Compute the norm of a matrix or vector.
JAX implementation of :func:`numpy.linalg.norm`.
Args:
x: N-dimensional array for which the norm will be computed.
ord: specify the kind of norm to take. Default is Frobenius norm for matrices,
and the 2-norm for vectors. For other options, see Notes below.
axis: integer or sequence of integers specifying the axes over which the norm
will be computed. For a single axis, compute a vector norm. For two axes,
compute a matrix norm. Defaults to all axes of ``x``.
keepdims: if True, the output array will have the same number of dimensions as
the input, with the size of reduced axes replaced by ``1`` (default: False).
Returns:
array containing the specified norm of x.
Notes:
The flavor of norm computed depends on the value of ``ord`` and the number of
axes being reduced.
For **vector norms** (i.e. a single axis reduction):
- ``ord=None`` (default) computes the 2-norm
- ``ord=inf`` computes ``max(abs(x))``
- ``ord=-inf`` computes min(abs(x))``
- ``ord=0`` computes ``sum(x!=0)``
- for other numerical values, computes ``sum(abs(x) ** ord)**(1/ord)``
For **matrix norms** (i.e. two axes reductions):
- ``ord='fro'`` or ``ord=None`` (default) computes the Frobenius norm
- ``ord='nuc'`` computes the nuclear norm, or the sum of the singular values
- ``ord=1`` computes ``max(abs(x).sum(0))``
- ``ord=-1`` computes ``min(abs(x).sum(0))``
- ``ord=2`` computes the 2-norm, i.e. the largest singular value
- ``ord=-2`` computes the smallest singular value
In the special case of ``ord=None`` and ``axis=None``, this function accepts an
array of any dimension and computes the vector 2-norm of the flattened array.
Examples:
Vector norms:
>>> x = jnp.array([3., 4., 12.])
>>> jnp.linalg.norm(x)
Array(13., dtype=float32)
>>> jnp.linalg.norm(x, ord=1)
Array(19., dtype=float32)
>>> jnp.linalg.norm(x, ord=0)
Array(3., dtype=float32)
Matrix norms:
>>> x = jnp.array([[1., 2., 3.],
... [4., 5., 7.]])
>>> jnp.linalg.norm(x) # Frobenius norm
Array(10.198039, dtype=float32)
>>> jnp.linalg.norm(x, ord='nuc') # nuclear norm
Array(10.762535, dtype=float32)
>>> jnp.linalg.norm(x, ord=1) # 1-norm
Array(10., dtype=float32)
Batched vector norm:
>>> jnp.linalg.norm(x, axis=1)
Array([3.7416575, 9.486833 ], dtype=float32)
"""
x = ensure_arraylike("jnp.linalg.norm", x)
x, = promote_dtypes_inexact(x)
x_shape = np.shape(x)
ndim = len(x_shape)
if axis is None:
# NumPy has an undocumented behavior that admits arbitrary rank inputs if
# `ord` is None: https://github.com/numpy/numpy/issues/14215
if ord is None:
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), keepdims=keepdims))
axis = tuple(range(ndim))
elif isinstance(axis, tuple):
axis = tuple(canonicalize_axis(x, ndim) for x in axis)
else:
axis = (canonicalize_axis(axis, ndim),)
num_axes = len(axis)
if num_axes == 1:
return vector_norm(x, ord=2 if ord is None else ord, axis=axis, keepdims=keepdims)
elif num_axes == 2:
row_axis, col_axis = axis # pytype: disable=bad-unpacking
if ord is None or ord in ('f', 'fro'):
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), axis=axis,
keepdims=keepdims))
elif ord == 1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
return reductions.amax(reductions.sum(ufuncs.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis, keepdims=keepdims, initial=0)
elif ord == -1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
return reductions.amin(reductions.sum(ufuncs.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis, keepdims=keepdims)
elif ord == np.inf:
if not keepdims and row_axis > col_axis:
row_axis -= 1
return reductions.amax(reductions.sum(ufuncs.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis, keepdims=keepdims, initial=0)
elif ord == -np.inf:
if not keepdims and row_axis > col_axis:
row_axis -= 1
return reductions.amin(reductions.sum(ufuncs.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis, keepdims=keepdims)
elif ord in ('nuc', 2, -2):
x = jnp.moveaxis(x, axis, (-2, -1))
s = svd(x, compute_uv=False)
if ord == 2:
y = reductions.amax(s, axis=-1, initial=0)
elif ord == -2:
y = reductions.amin(s, axis=-1)
else:
y = reductions.sum(s, axis=-1)
if keepdims:
y = jnp.expand_dims(y, axis)
return y
else:
raise ValueError(f"Invalid order '{ord}' for matrix norm.")
else:
raise ValueError(f"Improper number of axes for norm: {axis=}. Pass one axis to"
" compute a vector-norm, or two axes to compute a matrix-norm.")
@overload
def qr(a: ArrayLike,
mode: Literal["reduced", "complete", "raw", "full"] = "reduced",
) -> QRResult: ...
@overload
def qr(a: ArrayLike, mode: Literal["r"]) -> Array: ...
@overload
def qr(a: ArrayLike, mode: str) -> Array | QRResult: ...
@export
@api.jit(static_argnames=('mode',))
def qr(a: ArrayLike, mode: str = "reduced") -> Array | QRResult:
"""Compute the QR decomposition of an array
JAX implementation of :func:`numpy.linalg.qr`.
The QR decomposition of a matrix `A` is given by
.. math::
A = QR
Where `Q` is a unitary matrix (i.e. :math:`Q^HQ=I`) and `R` is an upper-triangular
matrix.
Args:
a: array of shape (..., M, N)
mode: Computational mode. Supported values are:
- ``"reduced"`` (default): return `Q` of shape ``(..., M, K)`` and `R` of shape
``(..., K, N)``, where ``K = min(M, N)``.
- ``"complete"``: return `Q` of shape ``(..., M, M)`` and `R` of shape ``(..., M, N)``.
- ``"raw"``: return lapack-internal representations of shape ``(..., M, N)`` and ``(..., K)``.
- ``"r"``: return `R` only.
Returns:
A tuple ``(Q, R)`` (if ``mode`` is not ``"r"``) otherwise an array ``R``,
where:
- ``Q`` is an orthogonal matrix of shape ``(..., M, K)`` (if ``mode`` is ``"reduced"``)
or ``(..., M, M)`` (if ``mode`` is ``"complete"``).
- ``R`` is an upper-triangular matrix of shape ``(..., M, N)`` (if ``mode`` is
``"r"`` or ``"complete"``) or ``(..., K, N)`` (if ``mode`` is ``"reduced"``)
with ``K = min(M, N)``.
See also:
- :func:`jax.scipy.linalg.qr`: SciPy-style QR decomposition API
- :func:`jax.lax.linalg.qr`: XLA-style QR decomposition API
Examples:
Compute the QR decomposition of a matrix:
>>> a = jnp.array([[1., 2., 3., 4.],
... [5., 4., 2., 1.],
... [6., 3., 1., 5.]])
>>> Q, R = jnp.linalg.qr(a)
>>> Q # doctest: +SKIP
Array([[-0.12700021, -0.7581426 , -0.6396022 ],
[-0.63500065, -0.43322435, 0.63960224],
[-0.7620008 , 0.48737738, -0.42640156]], dtype=float32)
>>> R # doctest: +SKIP
Array([[-7.8740077, -5.080005 , -2.4130025, -4.953006 ],
[ 0. , -1.7870499, -2.6534991, -1.028908 ],
[ 0. , 0. , -1.0660033, -4.050814 ]], dtype=float32)
Check that ``Q`` is orthonormal:
>>> jnp.allclose(Q.T @ Q, jnp.eye(3), atol=1E-5)
Array(True, dtype=bool)
Reconstruct the input:
>>> jnp.allclose(Q @ R, a)
Array(True, dtype=bool)
"""
a = ensure_arraylike("jnp.linalg.qr", a)
a, = promote_dtypes_inexact(a)
if mode == "raw":
a, taus = lax_linalg.geqrf(a)
return QRResult(a.mT, taus)
if mode in ("reduced", "r", "full"):
full_matrices = False
elif mode == "complete":
full_matrices = True
else:
raise ValueError(f"Unsupported QR decomposition mode '{mode}'")
q, r = lax_linalg.qr(a, pivoting=False, full_matrices=full_matrices)
if mode == "r":
return r
return QRResult(q, r)
@export
@api.jit
def solve(a: ArrayLike, b: ArrayLike) -> Array:
"""Solve a linear system of equations.
JAX implementation of :func:`numpy.linalg.solve`.
This solves a (batched) linear system of equations ``a @ x = b``
for ``x`` given ``a`` and ``b``.
If ``a`` is singular, this will return ``nan`` or ``inf`` values.
Args:
a: array of shape ``(..., N, N)``.
b: array of shape ``(N,)`` (for 1-dimensional right-hand-side) or
``(..., N, M)`` (for batched 2-dimensional right-hand-side).
Returns:
An array containing the result of the linear solve if ``a`` is non-singular.
The result has shape ``(..., N)`` if ``b`` is of shape ``(N,)``, and has
shape ``(..., N, M)`` otherwise.
If ``a`` is singular, the result contains ``nan`` or ``inf`` values.
See also:
- :func:`jax.scipy.linalg.solve`: SciPy-style API for solving linear systems.
- :func:`jax.lax.custom_linear_solve`: matrix-free linear solver.
Examples:
A simple 3x3 linear system:
>>> A = jnp.array([[1., 2., 3.],
... [2., 4., 2.],
... [3., 2., 1.]])
>>> b = jnp.array([14., 16., 10.])
>>> x = jnp.linalg.solve(A, b)
>>> x
Array([1., 2., 3.], dtype=float32)
Confirming that the result solves the system:
>>> jnp.allclose(A @ x, b)
Array(True, dtype=bool)
"""
a, b = ensure_arraylike("jnp.linalg.solve", a, b)
a, b = promote_dtypes_inexact(a, b)
if a.ndim < 2:
raise ValueError(
f"left hand array must be at least two dimensional; got {a.shape=}")
# Check for invalid inputs that previously would have led to a batched 1D solve:
if (b.ndim > 1 and a.ndim == b.ndim + 1 and
a.shape[-1] == b.shape[-1] and a.shape[-1] != b.shape[-2]):
raise ValueError(
f"Invalid shapes for solve: {a.shape}, {b.shape}. Prior to JAX v0.5.0,"
" this would have been treated as a batched 1-dimensional solve."
" To recover this behavior, use solve(a, b[..., None]).squeeze(-1).")
signature = "(m,m),(m)->(m)" if b.ndim == 1 else "(m,m),(m,n)->(m,n)"
a, b = core.standard_insert_pvary(a, b)
return jnp.vectorize(lax_linalg._solve, signature=signature)(a, b)
def _lstsq(a: ArrayLike, b: ArrayLike, rcond: float | None, *,
numpy_resid: bool = False) -> tuple[Array, Array, Array, Array]:
# TODO: add lstsq to lax_linalg and implement this function via those wrappers.
# TODO: add custom jvp rule for more robust lstsq differentiation
a, b = promote_dtypes_inexact(a, b)
if a.shape[0] != b.shape[0]:
raise ValueError("Leading dimensions of input arrays must match")
b_orig_ndim = b.ndim
if b_orig_ndim == 1:
b = b[:, None]
if a.ndim != 2:
raise TypeError(
f"{a.ndim}-dimensional array given. Array must be two-dimensional")
if b.ndim != 2:
raise TypeError(
f"{b.ndim}-dimensional array given. Array must be one or two-dimensional")
m, n = a.shape
dtype = a.dtype
if a.size == 0:
s = array_creation.empty(0, dtype=a.dtype)
rank = jnp.array(0, dtype=int)
x = array_creation.empty((n, *b.shape[1:]), dtype=a.dtype)
else:
if rcond is None:
rcond = float(jnp.finfo(dtype).eps) * max(n, m)
else:
rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond)
u, s, vt = svd(a, full_matrices=False)
mask = (s > 0) & (s >= jnp.array(rcond, dtype=s.dtype) * s[0])
rank = mask.sum()
safe_s = jnp.where(mask, s, 1).astype(a.dtype)
s_inv = jnp.where(mask, 1 / safe_s, 0)[:, np.newaxis]
uTb = tensor_contractions.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST)
x = tensor_contractions.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST)
# Numpy returns empty residuals in some cases. To allow compilation, we
# default to returning full residuals in all cases.
if numpy_resid and (rank < n or m <= n):
resid = jnp.asarray([])
else:
b_estimate = tensor_contractions.matmul(a, x, precision=lax.Precision.HIGHEST)
resid = norm(b - b_estimate, axis=0) ** 2
if b_orig_ndim == 1:
x = x.ravel()
return x, resid, rank, s
_jit_lstsq = api.jit(partial(_lstsq, numpy_resid=False))
@export
def lstsq(a: ArrayLike, b: ArrayLike, rcond: float | None = None, *,
numpy_resid: bool = False) -> tuple[Array, Array, Array, Array]:
"""
Return the least-squares solution to a linear equation.
JAX implementation of :func:`numpy.linalg.lstsq`.
Args:
a: array of shape ``(M, N)`` representing the coefficient matrix.
b: array of shape ``(M,)`` or ``(M, K)`` representing the right-hand side.
rcond: Cut-off ratio for small singular values. Singular values smaller than
``rcond * largest_singular_value`` are treated as zero. If None (default),
the optimal value will be used to reduce floating point errors.
numpy_resid: If True, compute and return residuals in the same way as NumPy's
`linalg.lstsq`. This is necessary if you want to precisely replicate NumPy's
behavior. If False (default), a more efficient method is used to compute residuals.
Returns:
Tuple of arrays ``(x, resid, rank, s)`` where
- ``x`` is a shape ``(N,)`` or ``(N, K)`` array containing the least-squares solution.
- ``resid`` is the sum of squared residual of shape ``()`` or ``(K,)``.
- ``rank`` is the rank of the matrix ``a``.
- ``s`` is the singular values of the matrix ``a``.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> b = jnp.array([5, 6])
>>> x, _, _, _ = jnp.linalg.lstsq(a, b)
>>> with jnp.printoptions(precision=3):
... print(x)
[-4. 4.5]
"""
a, b = ensure_arraylike("jnp.linalg.lstsq", a, b)
if numpy_resid:
return _lstsq(a, b, rcond, numpy_resid=True)
return _jit_lstsq(a, b, rcond)
@export
def cross(x1: ArrayLike, x2: ArrayLike, /, *, axis=-1):
r"""Compute the cross-product of two 3D vectors
JAX implementation of :func:`numpy.linalg.cross`
Args:
x1: N-dimensional array, with ``x1.shape[axis] == 3``
x2: N-dimensional array, with ``x2.shape[axis] == 3``, and other axes
broadcast-compatible with ``x1``.
axis: axis along which to take the cross product (default: -1).
Returns:
array containing the result of the cross-product
See Also:
:func:`jax.numpy.cross`: more flexible cross-product API.
Examples:
Showing that :math:`\hat{x} \times \hat{y} = \hat{z}`:
>>> x = jnp.array([1., 0., 0.])
>>> y = jnp.array([0., 1., 0.])
>>> jnp.linalg.cross(x, y)
Array([0., 0., 1.], dtype=float32)
Cross product of :math:`\hat{x}` with all three standard unit vectors,
via broadcasting:
>>> xyz = jnp.eye(3)
>>> jnp.linalg.cross(x, xyz, axis=-1)
Array([[ 0., 0., 0.],
[ 0., 0., 1.],
[ 0., -1., 0.]], dtype=float32)
"""
x1, x2 = ensure_arraylike("jnp.linalg.outer", x1, x2)
if x1.shape[axis] != 3 or x2.shape[axis] != 3:
raise ValueError(
"Both input arrays must be (arrays of) 3-dimensional vectors, "
f"but they have {x1.shape[axis]=} and {x2.shape[axis]=}"
)
return jnp.cross(x1, x2, axis=axis)
@export
def outer(x1: ArrayLike, x2: ArrayLike, /) -> Array:
"""Compute the outer product of two 1-dimensional arrays.
JAX implementation of :func:`numpy.linalg.outer`.
Args:
x1: array
x2: array
Returns:
array containing the outer product of ``x1`` and ``x2``
See also:
:func:`jax.numpy.outer`: similar function in the main :mod:`jax.numpy` module.
Examples:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.outer(x1, x2)
Array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]], dtype=int32)
"""
x1, x2 = ensure_arraylike("jnp.linalg.outer", x1, x2)
if x1.ndim != 1 or x2.ndim != 1:
raise ValueError(f"Input arrays must be one-dimensional, but they are {x1.ndim=} {x2.ndim=}")
return x1[:, None] * x2[None, :]
@export
def matrix_norm(x: ArrayLike, /, *, keepdims: bool = False, ord: str | int = 'fro') -> Array:
"""Compute the norm of a matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.matrix_norm`
Args:
x: array of shape ``(..., M, N)`` for which to take the norm.
keepdims: if True, keep the reduced dimensions in the output.
ord: A string or int specifying the type of norm; default is the Frobenius norm.
See :func:`numpy.linalg.norm` for details on available options.
Returns:
array containing the norm of ``x``. Has shape ``x.shape[:-2]`` if ``keepdims`` is
False, or shape ``(..., 1, 1)`` if ``keepdims`` is True.
See also:
- :func:`jax.numpy.linalg.vector_norm`: Norm of a vector or stack of vectors.
- :func:`jax.numpy.linalg.norm`: More general matrix or vector norm.
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.linalg.matrix_norm(x)
Array(16.881943, dtype=float32)
"""
x = ensure_arraylike('jnp.linalg.matrix_norm', x)
return norm(x, ord=ord, keepdims=keepdims, axis=(-2, -1))
@export
def matrix_transpose(x: ArrayLike, /) -> Array:
"""Transpose a matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.matrix_transpose`.
Args:
x: array of shape ``(..., M, N)``
Returns:
array of shape ``(..., N, M)`` containing the matrix transpose of ``x``.
See also:
:func:`jax.numpy.transpose`: more general transpose operation.
Examples:
Transpose of a single matrix:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.matrix_transpose(x)
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
Transpose of a stack of matrices:
>>> x = jnp.array([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> jnp.linalg.matrix_transpose(x)
Array([[[1, 3],
[2, 4]],
<BLANKLINE>
[[5, 7],
[6, 8]]], dtype=int32)
For convenience, the same computation can be done via the
:attr:`~jax.Array.mT` property of JAX array objects:
>>> x.mT
Array([[[1, 3],
[2, 4]],
<BLANKLINE>
[[5, 7],
[6, 8]]], dtype=int32)
"""
x_arr = ensure_arraylike('jnp.linalg.matrix_transpose', x)
ndim = x_arr.ndim
if ndim < 2:
raise ValueError(f"matrix_transpose requires at least 2 dimensions; got {ndim=}")
return lax.transpose(x_arr, (*range(ndim - 2), ndim - 1, ndim - 2))
@export
def vector_norm(x: ArrayLike, /, *, axis: int | tuple[int, ...] | None = None, keepdims: bool = False,
ord: int | str = 2) -> Array:
"""Compute the vector norm of a vector or batch of vectors.
JAX implementation of :func:`numpy.linalg.vector_norm`.
Args:
x: N-dimensional array for which to take the norm.
axis: optional axis along which to compute the vector norm. If None (default)
then ``x`` is flattened and the norm is taken over all values.
keepdims: if True, keep the reduced dimensions in the output.
ord: A string or int specifying the type of norm; default is the 2-norm.
See :func:`numpy.linalg.norm` for details on available options.
Returns:
array containing the norm of ``x``.
See also:
- :func:`jax.numpy.linalg.matrix_norm`: Norm of a matrix or stack of matrices.
- :func:`jax.numpy.linalg.norm`: More general matrix or vector norm.
Examples:
Norm of a single vector:
>>> x = jnp.array([1., 2., 3.])
>>> jnp.linalg.vector_norm(x)
Array(3.7416575, dtype=float32)
Norm of a batch of vectors:
>>> x = jnp.array([[1., 2., 3.],
... [4., 5., 7.]])
>>> jnp.linalg.vector_norm(x, axis=1)
Array([3.7416575, 9.486833 ], dtype=float32)
"""
x = ensure_arraylike('jnp.linalg.vector_norm', x)
if ord is None or ord == 2:
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), axis=axis,
keepdims=keepdims))
elif ord == np.inf:
return reductions.amax(ufuncs.abs(x), axis=axis, keepdims=keepdims, initial=0)
elif ord == -np.inf:
return reductions.amin(ufuncs.abs(x), axis=axis, keepdims=keepdims)
elif ord == 0:
return reductions.sum(x != 0, dtype=jnp.finfo(lax.dtype(x)).dtype,
axis=axis, keepdims=keepdims)
elif ord == 1:
# Numpy has a special case for ord == 1 as an optimization. We don't
# really need the optimization (XLA could do it for us), but the Numpy
# code has slightly different type promotion semantics, so we need a
# special case too.
return reductions.sum(ufuncs.abs(x), axis=axis, keepdims=keepdims)
elif isinstance(ord, str):
msg = f"Invalid order '{ord}' for vector norm."
if ord == "inf":
msg += "Use 'jax.numpy.inf' instead."
if ord == "-inf":
msg += "Use '-jax.numpy.inf' instead."
raise ValueError(msg)
else:
abs_x = ufuncs.abs(x)
ord_arr = lax._const(abs_x, ord)
ord_inv = lax._const(abs_x, 1. / ord_arr)
out = reductions.sum(abs_x ** ord_arr, axis=axis, keepdims=keepdims)
return ufuncs.power(out, ord_inv)
@export
def vecdot(x1: ArrayLike, x2: ArrayLike, /, *, axis: int = -1,
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None) -> Array:
"""Compute the (batched) vector conjugate dot product of two arrays.
JAX implementation of :func:`numpy.linalg.vecdot`.
Args:
x1: left-hand side array.
x2: right-hand side array. Size of ``x2[axis]`` must match size of ``x1[axis]``,
and remaining dimensions must be broadcast-compatible.
axis: axis along which to compute the dot product (default: -1)
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the conjugate dot product of ``x1`` and ``x2`` along ``axis``.
The non-contracted dimensions are broadcast together.
See also:
- :func:`jax.numpy.vecdot`: similar API in the ``jax.numpy`` namespace.
- :func:`jax.numpy.linalg.matmul`: matrix multiplication.
- :func:`jax.numpy.linalg.tensordot`: general tensor dot product.
Examples:
Vector dot product of two 1D arrays:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.vecdot(x1, x2)
Array(32, dtype=int32)
Batched vector dot product of two 2D arrays:
>>> x1 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> x2 = jnp.array([[2, 3, 4]])
>>> jnp.linalg.vecdot(x1, x2, axis=-1)
Array([20, 47], dtype=int32)
"""
x1, x2 = ensure_arraylike('jnp.linalg.vecdot', x1, x2)
return tensor_contractions.vecdot(x1, x2, axis=axis, precision=precision,
preferred_element_type=preferred_element_type)
@export
def matmul(x1: ArrayLike, x2: ArrayLike, /, *,
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None) -> Array:
"""Perform a matrix multiplication.
JAX implementation of :func:`numpy.linalg.matmul`.
Args:
x1: first input array, of shape ``(..., N)``.
x2: second input array. Must have shape ``(N,)`` or ``(..., N, M)``.
In the multi-dimensional case, leading dimensions must be broadcast-compatible
with the leading dimensions of ``x1``.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the matrix product of the inputs. Shape is ``x1.shape[:-1]``
if ``x2.ndim == 1``, otherwise the shape is ``(..., M)``.
See Also:
:func:`jax.numpy.matmul`: NumPy API for this function.
:func:`jax.numpy.linalg.vecdot`: batched vector product.
:func:`jax.numpy.linalg.tensordot`: batched tensor product.
Examples:
Vector dot products:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.matmul(x1, x2)
Array(32, dtype=int32)
Matrix dot product:
>>> x1 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> x2 = jnp.array([[1, 2],
... [3, 4],
... [5, 6]])
>>> jnp.linalg.matmul(x1, x2)
Array([[22, 28],
[49, 64]], dtype=int32)
For convenience, in all cases you can do the same computation using
the ``@`` operator:
>>> x1 @ x2
Array([[22, 28],
[49, 64]], dtype=int32)
"""
x1, x2 = ensure_arraylike('jnp.linalg.matmul', x1, x2)
return tensor_contractions.matmul(x1, x2, precision=precision,
preferred_element_type=preferred_element_type)
@export
def tensordot(x1: ArrayLike, x2: ArrayLike, /, *,
axes: int | tuple[Sequence[int], Sequence[int]] = 2,
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None,
out_sharding: NamedSharding | P | None = None) -> Array:
"""Compute the tensor dot product of two N-dimensional arrays.
JAX implementation of :func:`numpy.linalg.tensordot`.
Args:
x1: N-dimensional array
x2: M-dimensional array
axes: integer or tuple of sequences of integers. If an integer `k`, then
sum over the last `k` axes of ``x1`` and the first `k` axes of ``x2``,
in order. If a tuple, then ``axes[0]`` specifies the axes of ``x1`` and
``axes[1]`` specifies the axes of ``x2``.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the tensor dot product of the inputs
See also:
- :func:`jax.numpy.tensordot`: equivalent API in the :mod:`jax.numpy` namespace.
- :func:`jax.numpy.einsum`: NumPy API for more general tensor contractions.
- :func:`jax.lax.dot_general`: XLA API for more general tensor contractions.
Examples:
>>> x1 = jnp.arange(24.).reshape(2, 3, 4)
>>> x2 = jnp.ones((3, 4, 5))
>>> jnp.linalg.tensordot(x1, x2)
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Equivalent result when specifying the axes as explicit sequences:
>>> jnp.linalg.tensordot(x1, x2, axes=([1, 2], [0, 1]))
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Equivalent result via :func:`~jax.numpy.einsum`:
>>> jnp.einsum('ijk,jkm->im', x1, x2)
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Setting ``axes=1`` for two-dimensional inputs is equivalent to a matrix
multiplication:
>>> x1 = jnp.array([[1, 2],
... [3, 4]])
>>> x2 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.tensordot(x1, x2, axes=1)
Array([[ 9, 12, 15],
[19, 26, 33]], dtype=int32)
>>> x1 @ x2
Array([[ 9, 12, 15],
[19, 26, 33]], dtype=int32)
Setting ``axes=0`` for one-dimensional inputs is equivalent to
:func:`jax.numpy.linalg.outer`:
>>> x1 = jnp.array([1, 2])
>>> x2 = jnp.array([1, 2, 3])
>>> jnp.linalg.tensordot(x1, x2, axes=0)
Array([[1, 2, 3],
[2, 4, 6]], dtype=int32)
>>> jnp.linalg.outer(x1, x2)
Array([[1, 2, 3],
[2, 4, 6]], dtype=int32)
"""
x1, x2 = ensure_arraylike('jnp.linalg.tensordot', x1, x2)
return tensor_contractions.tensordot(
x1, x2, axes=axes, precision=precision,
preferred_element_type=preferred_element_type, out_sharding=out_sharding)
@export
def svdvals(x: ArrayLike, /) -> Array:
"""Compute the singular values of a matrix.
JAX implementation of :func:`numpy.linalg.svdvals`.
Args:
x: array of shape ``(..., M, N)`` for which singular values will be computed.
Returns:
array of singular values of shape ``(..., K)`` with ``K = min(M, N)``.
See also:
:func:`jax.numpy.linalg.svd`: compute singular values and singular vectors
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.svdvals(x)
Array([9.508031 , 0.7728694], dtype=float32)
"""
x = ensure_arraylike('jnp.linalg.svdvals', x)
return svd(x, compute_uv=False, hermitian=False)
@export
def diagonal(x: ArrayLike, /, *, offset: int = 0) -> Array:
"""Extract the diagonal of an matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.diagonal`.
Args:
x: array of shape ``(..., M, N)`` from which the diagonal will be extracted.
offset: positive or negative offset from the main diagonal.
Returns:
Array of shape ``(..., K)`` where ``K`` is the length of the specified diagonal.
See Also:
- :func:`jax.numpy.diagonal`: more general functionality for extracting diagonals.
- :func:`jax.numpy.diag`: create a diagonal matrix from values.
Examples:
Diagonals of a single matrix:
>>> x = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> jnp.linalg.diagonal(x)
Array([ 1, 6, 11], dtype=int32)
>>> jnp.linalg.diagonal(x, offset=1)
Array([ 2, 7, 12], dtype=int32)
>>> jnp.linalg.diagonal(x, offset=-1)
Array([ 5, 10], dtype=int32)
Batched diagonals:
>>> x = jnp.arange(24).reshape(2, 3, 4)
>>> jnp.linalg.diagonal(x)
Array([[ 0, 5, 10],
[12, 17, 22]], dtype=int32)
"""
x = ensure_arraylike('jnp.linalg.diagonal', x)
return jnp.diagonal(x, offset=offset, axis1=-2, axis2=-1)
@export
def tensorinv(a: ArrayLike, ind: int = 2) -> Array:
"""Compute the tensor inverse of an array.
JAX implementation of :func:`numpy.linalg.tensorinv`.
This computes the inverse of the :func:`~jax.numpy.linalg.tensordot`
operation with the same ``ind`` value.
Args:
a: array to be inverted. Must have ``prod(a.shape[:ind]) == prod(a.shape[ind:])``
ind: positive integer specifying the number of indices in the tensor product.
Returns:
array of shape ``(*a.shape[ind:], *a.shape[:ind])`` containing the
tensor inverse of ``a``.
See also:
- :func:`jax.numpy.linalg.tensordot`
- :func:`jax.numpy.linalg.tensorsolve`
Examples:
>>> key = jax.random.key(1337)
>>> x = jax.random.normal(key, shape=(2, 2, 4))
>>> xinv = jnp.linalg.tensorinv(x, 2)
>>> xinv_x = jnp.linalg.tensordot(xinv, x, axes=2)
>>> jnp.allclose(xinv_x, jnp.eye(4), atol=1E-4)
Array(True, dtype=bool)
"""
arr = ensure_arraylike("tensorinv", a)
ind = operator.index(ind)
if ind <= 0:
raise ValueError(f"ind must be a positive integer; got {ind=}")
contracting_shape, batch_shape = arr.shape[:ind], arr.shape[ind:]
flatshape = (math.prod(contracting_shape), math.prod(batch_shape))
if flatshape[0] != flatshape[1]:
raise ValueError("tensorinv is only possible when the product of the first"
" `ind` dimensions equals that of the remaining dimensions."
f" got {arr.shape=} with {ind=}.")
return inv(arr.reshape(flatshape)).reshape(*batch_shape, *contracting_shape)
@export
def tensorsolve(a: ArrayLike, b: ArrayLike, axes: tuple[int, ...] | None = None) -> Array:
"""Solve the tensor equation a x = b for x.
JAX implementation of :func:`numpy.linalg.tensorsolve`.
Args:
a: input array. After reordering via ``axes`` (see below), shape must be
``(*b.shape, *x.shape)``.
b: right-hand-side array.
axes: optional tuple specifying axes of ``a`` that should be moved to the end
Returns:
array x such that after reordering of axes of ``a``, ``tensordot(a, x, x.ndim)``
is equivalent to ``b``.
See also:
- :func:`jax.numpy.linalg.tensordot`
- :func:`jax.numpy.linalg.tensorinv`
Examples:
>>> key1, key2 = jax.random.split(jax.random.key(8675309))
>>> a = jax.random.normal(key1, shape=(2, 2, 4))
>>> b = jax.random.normal(key2, shape=(2, 2))
>>> x = jnp.linalg.tensorsolve(a, b)
>>> x.shape
(4,)
Now show that ``x`` can be used to reconstruct ``b`` using
:func:`~jax.numpy.linalg.tensordot`:
>>> b_reconstructed = jnp.linalg.tensordot(a, x, axes=x.ndim)
>>> jnp.allclose(b, b_reconstructed)
Array(True, dtype=bool)
"""
a_arr, b_arr = ensure_arraylike("tensorsolve", a, b)
if axes is not None:
a_arr = jnp.moveaxis(a_arr, axes, len(axes) * (a_arr.ndim - 1,))
out_shape = a_arr.shape[b_arr.ndim:]
if a_arr.shape[:b_arr.ndim] != b_arr.shape:
raise ValueError("After moving axes to end, leading shape of a must match shape of b."
f" got a.shape={a_arr.shape}, b.shape={b_arr.shape}")
if b_arr.size != math.prod(out_shape):
raise ValueError("Input arrays must have prod(a.shape[:b.ndim]) == prod(a.shape[b.ndim:]);"
f" got a.shape={a_arr.shape}, b.ndim={b_arr.ndim}.")
a_arr = a_arr.reshape(b_arr.size, math.prod(out_shape))
return solve(a_arr, b_arr.ravel()).reshape(out_shape)
@export
def multi_dot(arrays: Sequence[ArrayLike], *, precision: lax.PrecisionLike = None) -> Array:
"""Efficiently compute matrix products between a sequence of arrays.
JAX implementation of :func:`numpy.linalg.multi_dot`.
JAX internally uses the opt_einsum library to compute the most efficient
operation order.
Args:
arrays: sequence of arrays. All must be two-dimensional, except the first
and last which may be one-dimensional.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
an array representing the equivalent of ``reduce(jnp.matmul, arrays)``, but
evaluated in the optimal order.
This function exists because the cost of computing sequences of matmul operations
can differ vastly depending on the order in which the operations are evaluated.
For a single matmul, the number of floating point operations (flops) required to
compute a matrix product can be approximated this way:
>>> def approx_flops(x, y):
... # for 2D x and y, with x.shape[1] == y.shape[0]
... return 2 * x.shape[0] * x.shape[1] * y.shape[1]
Suppose we have three matrices that we'd like to multiply in sequence:
>>> key1, key2, key3 = jax.random.split(jax.random.key(0), 3)
>>> x = jax.random.normal(key1, shape=(200, 5))
>>> y = jax.random.normal(key2, shape=(5, 100))
>>> z = jax.random.normal(key3, shape=(100, 10))
Because of associativity of matrix products, there are two orders in which we might
evaluate the product ``x @ y @ z``, and both produce equivalent outputs up to floating
point precision:
>>> result1 = (x @ y) @ z
>>> result2 = x @ (y @ z)
>>> jnp.allclose(result1, result2, atol=1E-4)
Array(True, dtype=bool)
But the computational cost of these differ greatly:
>>> print("(x @ y) @ z flops:", approx_flops(x, y) + approx_flops(x @ y, z))
(x @ y) @ z flops: 600000
>>> print("x @ (y @ z) flops:", approx_flops(y, z) + approx_flops(x, y @ z))
x @ (y @ z) flops: 30000
The second approach is about 20x more efficient in terms of estimated flops!
``multi_dot`` is a function that will automatically choose the fastest
computational path for such problems:
>>> result3 = jnp.linalg.multi_dot([x, y, z])
>>> jnp.allclose(result1, result3, atol=1E-4)
Array(True, dtype=bool)
We can use JAX's :ref:`ahead-of-time-lowering` tools to estimate the total flops
of each approach, and confirm that ``multi_dot`` is choosing the more efficient
option:
>>> jax.jit(lambda x, y, z: (x @ y) @ z).lower(x, y, z).cost_analysis()['flops']
600000.0
>>> jax.jit(lambda x, y, z: x @ (y @ z)).lower(x, y, z).cost_analysis()['flops']
30000.0
>>> jax.jit(jnp.linalg.multi_dot).lower([x, y, z]).cost_analysis()['flops']
30000.0
"""
arrs = list(ensure_arraylike('jnp.linalg.multi_dot', *arrays))
if len(arrs) < 2:
raise ValueError(f"multi_dot requires at least two arrays; got len(arrays)={len(arrs)}")
if not (arrs[0].ndim in (1, 2) and arrs[-1].ndim in (1, 2) and
all(a.ndim == 2 for a in arrs[1:-1])):
raise ValueError("multi_dot: input arrays must all be two-dimensional, except for"
" the first and last array which may be 1 or 2 dimensional."
f" Got array shapes {[a.shape for a in arrs]}")
if any(a.shape[-1] != b.shape[0] for a, b in zip(arrs[:-1], arrs[1:])):
raise ValueError("multi_dot: last dimension of each array must match first dimension"
f" of following array. Got array shapes {[a.shape for a in arrs]}")
einsum_axes: list[tuple[int, ...]] = [(i, i+1) for i in range(len(arrs))]
if arrs[0].ndim == 1:
einsum_axes[0] = einsum_axes[0][1:]
if arrs[-1].ndim == 1:
einsum_axes[-1] = einsum_axes[-1][:1]
return einsum.einsum(*itertools.chain(*zip(arrs, einsum_axes)), # type: ignore[call-overload]
optimize='auto', precision=precision)
@export
@api.jit(static_argnames=['p'])
def cond(x: ArrayLike, p=None):
"""Compute the condition number of a matrix.
JAX implementation of :func:`numpy.linalg.cond`.
The condition number is defined as ``norm(x, p) * norm(inv(x), p)``. For ``p = 2``
(the default), the condition number is the ratio of the largest to the smallest
singular value.
Args:
x: array of shape ``(..., M, N)`` for which to compute the condition number.
p: the order of the norm to use. One of ``{None, 1, -1, 2, -2, inf, -inf, 'fro'}``;
see :func:`jax.numpy.linalg.norm` for the meaning of these. The default is ``p = None``,
which is equivalent to ``p = 2``. If not in ``{None, 2, -2}`` then ``x`` must be square,
i.e. ``M = N``.
Returns:
array of shape ``x.shape[:-2]`` containing the condition number.
See also:
:func:`jax.numpy.linalg.norm`
Examples:
Well-conditioned matrix:
>>> x = jnp.array([[1, 2],
... [2, 1]])
>>> jnp.linalg.cond(x)
Array(3., dtype=float32)
Ill-conditioned matrix:
>>> x = jnp.array([[1, 2],
... [0, 0]])
>>> jnp.linalg.cond(x)
Array(inf, dtype=float32)
"""
arr = ensure_arraylike("cond", x)
if arr.ndim < 2:
raise ValueError(f"jnp.linalg.cond: input array must be at least 2D; got {arr.shape=}")
if arr.shape[-1] == 0 or arr.shape[-2] == 0:
raise ValueError(f"jnp.linalg.cond: input array must not be empty; got {arr.shape=}")
if p is None or p == 2:
s = svdvals(x)
return s[..., 0] / s[..., -1]
elif p == -2:
s = svdvals(x)
r = s[..., -1] / s[..., 0]
else:
if arr.shape[-2] != arr.shape[-1]:
raise ValueError(f"jnp.linalg.cond: for {p=}, array must be square; got {arr.shape=}")
r = norm(x, ord=p, axis=(-2, -1)) * norm(inv(x), ord=p, axis=(-2, -1))
# Convert NaNs to infs where original array has no NaNs.
return jnp.where(ufuncs.isnan(r) & ~ufuncs.isnan(x).any(axis=(-2, -1)), np.inf, r)
@export
def trace(x: ArrayLike, /, *,
offset: int = 0, dtype: DTypeLike | None = None) -> Array:
"""Compute the trace of a matrix.
JAX implementation of :func:`numpy.linalg.trace`.
Args:
x: array of shape ``(..., M, N)`` and whose innermost two
dimensions form MxN matrices for which to take the trace.
offset: positive or negative offset from the main diagonal
(default: 0).
dtype: data type of the returned array (default: ``None``). If ``None``,
then output dtype will match the dtype of ``x``, promoted to default
precision in the case of integer types.
Returns:
array of batched traces with shape ``x.shape[:-2]``
See also:
- :func:`jax.numpy.trace`: similar API in the ``jax.numpy`` namespace.
Examples:
Trace of a single matrix:
>>> x = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> jnp.linalg.trace(x)
Array(18, dtype=int32)
>>> jnp.linalg.trace(x, offset=1)
Array(21, dtype=int32)
>>> jnp.linalg.trace(x, offset=-1, dtype="float32")
Array(15., dtype=float32)
Batched traces:
>>> x = jnp.arange(24).reshape(2, 3, 4)
>>> jnp.linalg.trace(x)
Array([15, 51], dtype=int32)
"""
x = ensure_arraylike('jnp.linalg.trace', x)
return jnp.trace(x, offset=offset, axis1=-2, axis2=-1, dtype=dtype)
| SVDResult |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_serializers.py | {
"start": 227,
"end": 5350
} | class ____(UptimeTestCase):
def test(self) -> None:
detector = self.create_uptime_detector()
uptime_subscription = get_uptime_subscription(detector)
result = serialize(detector, serializer=UptimeDetectorSerializer())
assert result == {
"id": str(detector.id),
"projectSlug": self.project.slug,
"name": detector.name,
"environment": detector.config.get("environment"),
"status": "active",
"uptimeStatus": 1, # UptimeStatus.OK from detector state
"mode": detector.config.get("mode", 1),
"url": uptime_subscription.url,
"method": uptime_subscription.method,
"body": uptime_subscription.body,
"headers": [],
"intervalSeconds": uptime_subscription.interval_seconds,
"timeoutMs": uptime_subscription.timeout_ms,
"owner": None,
"traceSampling": False,
"recoveryThreshold": detector.config["recovery_threshold"],
"downtimeThreshold": detector.config["downtime_threshold"],
}
def test_default_name(self) -> None:
"""
Right now no monitors have names. Once we name everything we can remove this
"""
detector = self.create_uptime_detector(name="")
uptime_subscription = get_uptime_subscription(detector)
result = serialize(detector, serializer=UptimeDetectorSerializer())
assert result == {
"id": str(detector.id),
"projectSlug": self.project.slug,
"name": f"Uptime Monitoring for {uptime_subscription.url}",
"environment": detector.config.get("environment"),
"status": "active",
"uptimeStatus": 1, # UptimeStatus.OK from detector state
"mode": detector.config.get("mode", 1),
"url": uptime_subscription.url,
"method": uptime_subscription.method,
"body": uptime_subscription.body,
"headers": [],
"intervalSeconds": uptime_subscription.interval_seconds,
"timeoutMs": uptime_subscription.timeout_ms,
"owner": None,
"traceSampling": False,
"recoveryThreshold": detector.config["recovery_threshold"],
"downtimeThreshold": detector.config["downtime_threshold"],
}
def test_owner(self) -> None:
detector = self.create_uptime_detector(owner=self.user)
uptime_subscription = get_uptime_subscription(detector)
result = serialize(detector, serializer=UptimeDetectorSerializer())
assert result == {
"id": str(detector.id),
"projectSlug": self.project.slug,
"name": detector.name,
"environment": detector.config.get("environment"),
"status": "active",
"uptimeStatus": 1, # UptimeStatus.OK from detector state
"mode": detector.config.get("mode", 1),
"url": uptime_subscription.url,
"method": uptime_subscription.method,
"body": uptime_subscription.body,
"headers": [],
"intervalSeconds": uptime_subscription.interval_seconds,
"timeoutMs": uptime_subscription.timeout_ms,
"owner": {
"email": self.user.email,
"id": str(self.user.id),
"name": self.user.get_username(),
"type": "user",
},
"traceSampling": False,
"recoveryThreshold": detector.config["recovery_threshold"],
"downtimeThreshold": detector.config["downtime_threshold"],
}
def test_trace_sampling(self) -> None:
uptime_subscription = self.create_uptime_subscription(trace_sampling=True)
detector = self.create_uptime_detector(uptime_subscription=uptime_subscription)
result = serialize(detector, serializer=UptimeDetectorSerializer())
assert result["traceSampling"] is True
def test_custom_thresholds(self) -> None:
"""Test that custom threshold values are properly serialized."""
detector = self.create_uptime_detector(recovery_threshold=2, downtime_threshold=5)
result = serialize(detector, serializer=UptimeDetectorSerializer())
assert result["recoveryThreshold"] == 2
assert result["downtimeThreshold"] == 5
def test_bulk_detector_id_lookup(self) -> None:
"""Test that detector IDs are properly included when serializing multiple monitors."""
# Create multiple monitors
detectors = [
self.create_uptime_detector(name="Monitor 1"),
self.create_uptime_detector(name="Monitor 2"),
self.create_uptime_detector(name="Monitor 3"),
]
# Get the detectors and serialize them
results = serialize(detectors, serializer=UptimeDetectorSerializer())
# Verify each has the correct ID
for i, result in enumerate(results):
assert result["id"] == str(detectors[i].id)
assert result["name"] == detectors[i].name
| UptimeDetectorSerializerTest |
python | sanic-org__sanic | sanic/handlers/content_range.py | {
"start": 279,
"end": 2534
} | class ____(Range):
"""Parse and process the incoming request headers to extract the content range information.
Args:
request (Request): The incoming request object.
stats (os.stat_result): The stats of the file being served.
""" # noqa: E501
__slots__ = ("start", "end", "size", "total", "headers")
def __init__(self, request: Request, stats: os.stat_result) -> None:
self.total = stats.st_size
_range = request.headers.getone("range", None)
if _range is None:
raise HeaderNotFound("Range Header Not Found")
unit, _, value = tuple(map(str.strip, _range.partition("=")))
if unit != "bytes":
raise InvalidRangeType(
"{} is not a valid Range Type".format(unit), self
)
start_b, _, end_b = tuple(map(str.strip, value.partition("-")))
try:
self.start = int(start_b) if start_b else None
except ValueError:
raise RangeNotSatisfiable(
"'{}' is invalid for Content Range".format(start_b), self
)
try:
self.end = int(end_b) if end_b else None
except ValueError:
raise RangeNotSatisfiable(
"'{}' is invalid for Content Range".format(end_b), self
)
if self.end is None:
if self.start is None:
raise RangeNotSatisfiable(
"Invalid for Content Range parameters", self
)
else:
# this case represents `Content-Range: bytes 5-`
self.end = self.total - 1
else:
if self.start is None:
# this case represents `Content-Range: bytes -5`
self.start = self.total - self.end
self.end = self.total - 1
if self.start > self.end:
raise RangeNotSatisfiable(
"Invalid for Content Range parameters", self
)
self.size = self.end - self.start + 1
self.headers = {
"Content-Range": "bytes %s-%s/%s"
% (self.start, self.end, self.total)
}
def __bool__(self):
return hasattr(self, "size") and self.size > 0
| ContentRangeHandler |
python | ionelmc__pytest-benchmark | tests/test_elasticsearch_storage.py | {
"start": 1380,
"end": 1527
} | class ____(BytesIO):
def close(self):
value = self.getvalue()
super().close()
self.getvalue = lambda: value
| LooseFileLike |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modular_qwen3_vl.py | {
"start": 21083,
"end": 21367
} | class ____(Qwen2VLPreTrainedModel):
config: Qwen3VLConfig
_no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
_can_record_outputs = {
"hidden_states": Qwen3VLTextDecoderLayer,
"attentions": Qwen3VLTextAttention,
}
| Qwen3VLPreTrainedModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_budget_stream.py | {
"start": 178,
"end": 2228
} | class ____(TestBulkStream):
stream_name = "budget"
account_id = "180535609"
cursor_field = "Modified Time"
download_entity = "Budgets"
def test_return_records_from_given_csv_file(self):
self.mock_apis(file=self.stream_name)
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, "budget")
assert len(output.records) == 1
def test_return_zero_record_from_empty_csv(self):
self.mock_apis(file="budget_empty")
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, "budget_empty")
assert len(output.records) == 0
def test_transform_records(self):
self.mock_apis(file=self.stream_name)
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, "budget")
assert output.records
for record in output.records:
assert "Account Id" in record.record.data.keys()
assert isinstance(record.record.data["Account Id"], int)
@freeze_time("2024-02-26")
def test_incremental_read_cursor_value_matches_value_from_most_recent_record(self):
self.mock_apis(file="budget_with_cursor_value")
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, "budget_with_cursor_value")
assert len(output.records) == 8
assert output.most_recent_state.stream_state.states[0]["cursor"] == {self.cursor_field: "2024-01-01T12:54:12.028+0000"}
@freeze_time("2024-02-26") # mock current time as stream data available for 30 days only
def test_incremental_read_with_state(self):
self.mock_apis(file="budget_with_state", read_with_state=True)
state = self._state("budget_state", self.stream_name)
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, "budget_with_state", state)
assert len(output.records) == 8
assert output.most_recent_state.stream_state.states[0]["cursor"] == {self.cursor_field: "2024-01-30T12:54:12.028+0000"}
| TestBudgetStream |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/tensor_pointwise.py | {
"start": 5688,
"end": 7653
} | class ____(Operator):
"""Operator for torch.cumsum (cumulative sum along a dimension)."""
def __init__(self):
super().__init__("cumsum")
@property
def torch_op_name(self) -> str:
return "torch.cumsum"
def can_produce(self, output_spec: Spec) -> bool:
"""Cumsum can produce tensors but not scalars."""
if isinstance(output_spec, TensorSpec) and output_spec.dtype == torch.bool:
return False
# Cumsum needs at least 1 dimension
if isinstance(output_spec, TensorSpec) and len(output_spec.size) == 0:
return False
return isinstance(output_spec, TensorSpec)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for cumsum operation.
Cumsum takes an input tensor with same shape and dtype as output.
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("CumsumOperator can only produce TensorSpec outputs")
return [
TensorSpec(
size=output_spec.size,
stride=output_spec.stride,
dtype=output_spec.dtype,
)
]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for cumsum operation."""
if len(input_names) != 1:
raise ValueError("Cumsum requires exactly 1 input tensor")
if not isinstance(output_spec, TensorSpec):
raise ValueError("Output spec must be a TensorSpec")
input_name = input_names[0]
# Choose a random valid dimension
num_dims = len(output_spec.size)
if num_dims == 0:
raise ValueError("Cumsum requires tensor with at least 1 dimension")
# Pick a random dimension index
dim = random.randint(0, num_dims - 1)
return f"{output_name} = torch.cumsum({input_name}, dim={dim})"
| CumsumOperator |
python | tensorflow__tensorflow | tensorflow/python/ops/clustering_ops_test.py | {
"start": 5111,
"end": 7473
} | class ____(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(
indices,
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances,
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(
indices,
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances,
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
| NearestCentersLargeTest |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 35110,
"end": 36170
} | class ____(Operation):
def __init__(self, dtype=None, *, name=None):
super().__init__(name=name)
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
def call(self, x):
return backend.numpy.array(x, dtype=self.dtype)
def compute_output_spec(self, x, dtype=None):
dtype = (
backend.standardize_dtype(x.dtype)
if self.dtype is None
else self.dtype
)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.array", "keras.ops.numpy.array"])
def array(x, dtype=None):
"""Create a tensor.
Args:
x: Input tensor.
dtype: The desired data-type for the tensor.
Returns:
A tensor.
Examples:
>>> keras.ops.array([1, 2, 3])
array([1, 2, 3], dtype=int32)
>>> keras.ops.array([1, 2, 3], dtype="float32")
array([1., 2., 3.], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Array(dtype=dtype).symbolic_call(x)
return backend.numpy.array(x, dtype=dtype)
| Array |
python | sphinx-doc__sphinx | sphinx/builders/linkcheck.py | {
"start": 9884,
"end": 9983
} | class ____(NamedTuple):
uri: str
docname: str
docpath: _StrPath
lineno: int
| Hyperlink |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/code_location.py | {
"start": 24239,
"end": 43034
} | class ____(CodeLocation):
def __init__(
self,
origin: CodeLocationOrigin,
instance: DagsterInstance,
host: Optional[str] = None,
port: Optional[int] = None,
socket: Optional[str] = None,
heartbeat: Optional[bool] = False,
watch_server: Optional[bool] = True,
grpc_server_registry: Optional[GrpcServerRegistry] = None,
grpc_metadata: Optional[Sequence[tuple[str, str]]] = None,
):
from dagster._api.get_server_id import sync_get_server_id
from dagster._api.list_repositories import sync_list_repositories_grpc
from dagster._api.snapshot_repository import sync_get_external_repositories_data_grpc
from dagster._grpc.client import DagsterGrpcClient, client_heartbeat_thread
self._origin = check.inst_param(origin, "origin", CodeLocationOrigin)
self._instance = instance
self.grpc_server_registry = check.opt_inst_param(
grpc_server_registry, "grpc_server_registry", GrpcServerRegistry
)
if isinstance(self.origin, GrpcServerCodeLocationOrigin):
self._port = self.origin.port
self._socket = self.origin.socket
self._host = self.origin.host
self._use_ssl = bool(self.origin.use_ssl)
else:
self._port = check.opt_int_param(port, "port")
self._socket = check.opt_str_param(socket, "socket")
self._host = check.str_param(host, "host")
self._use_ssl = False
self._heartbeat_shutdown_event = None
self._heartbeat_thread = None
self._heartbeat = check.bool_param(heartbeat, "heartbeat")
self._watch_server = check.bool_param(watch_server, "watch_server")
self._server_id = None
self._executable_path = None
self._container_image = None
self._container_context = None
self._repository_code_pointer_dict = None
self._entry_point = None
try:
self.client = DagsterGrpcClient(
port=self._port,
socket=self._socket,
host=self._host,
use_ssl=self._use_ssl,
metadata=grpc_metadata,
)
list_repositories_response = sync_list_repositories_grpc(self.client)
self._server_id = sync_get_server_id(self.client)
self.repository_names = set(
symbol.repository_name for symbol in list_repositories_response.repository_symbols
)
if self._heartbeat:
self._heartbeat_shutdown_event = threading.Event()
self._heartbeat_thread = threading.Thread(
target=client_heartbeat_thread,
args=(
self.client,
self._heartbeat_shutdown_event,
),
name="grpc-client-heartbeat",
daemon=True,
)
self._heartbeat_thread.start()
self._executable_path = list_repositories_response.executable_path
self._repository_code_pointer_dict = (
list_repositories_response.repository_code_pointer_dict
)
self._entry_point = list_repositories_response.entry_point
self._dagster_library_versions = list_repositories_response.dagster_library_versions
self._container_image = (
list_repositories_response.container_image
or self._reload_current_image() # Back-compat for older gRPC servers that did not include container_image in ListRepositoriesResponse
)
self._container_context = list_repositories_response.container_context
self._job_snaps_by_name = defaultdict(dict)
self._job_snaps_by_snapshot_id = {}
self.remote_repositories = {}
for repo_name, (repo_data, job_snaps) in sync_get_external_repositories_data_grpc(
self.client,
self,
defer_snapshots=True,
).items():
self.remote_repositories[repo_name] = RemoteRepository(
repo_data,
RepositoryHandle.from_location(
repository_name=repo_name,
code_location=self,
),
auto_materialize_use_sensors=instance.auto_materialize_use_sensors,
ref_to_data_fn=partial(self._job_ref_to_snap, repo_name),
)
for job_snap in job_snaps.values():
self._job_snaps_by_name[repo_name][job_snap.name] = job_snap
self._job_snaps_by_snapshot_id[job_snap.snapshot_id] = job_snap
except:
self.cleanup()
raise
def _job_ref_to_snap(self, repository_name: str, job_ref: "JobRefSnap") -> "JobDataSnap":
from dagster._core.remote_representation.external_data import JobDataSnap
# key by name to ensure that we are resilient to snapshot ID instability
snapshot = self._job_snaps_by_name[repository_name][job_ref.name]
parent_snapshot = (
self._job_snaps_by_snapshot_id.get(job_ref.parent_snapshot_id)
if job_ref.parent_snapshot_id
else None
)
return JobDataSnap(
name=job_ref.name,
job=snapshot,
parent_job=parent_snapshot,
active_presets=job_ref.active_presets,
)
@property
def server_id(self) -> str:
return check.not_none(self._server_id)
@property
def origin(self) -> CodeLocationOrigin:
return self._origin
@property
def container_image(self) -> str:
return cast("str", self._container_image)
@cached_property
def container_context(self) -> Optional[Mapping[str, Any]]:
return self._container_context
@property
def repository_code_pointer_dict(self) -> Mapping[str, Optional[CodePointer]]:
return cast("Mapping[str, Optional[CodePointer]]", self._repository_code_pointer_dict)
@property
def executable_path(self) -> Optional[str]:
return self._executable_path
@property
def entry_point(self) -> Optional[Sequence[str]]:
return self._entry_point
@property
def port(self) -> Optional[int]:
return self._port
@property
def socket(self) -> Optional[str]:
return self._socket
@property
def host(self) -> str:
return self._host
@property
def use_ssl(self) -> bool:
return self._use_ssl
def _reload_current_image(self) -> Optional[str]:
return deserialize_value(
self.client.get_current_image(),
GetCurrentImageResult,
).current_image
def get_current_runs(self) -> Sequence[str]:
return deserialize_value(self.client.get_current_runs(), GetCurrentRunsResult).current_runs
def cleanup(self) -> None:
if self._heartbeat_shutdown_event:
self._heartbeat_shutdown_event.set()
self._heartbeat_shutdown_event = None
if self._heartbeat_thread:
self._heartbeat_thread.join()
self._heartbeat_thread = None
@property
def is_reload_supported(self) -> bool:
return True
def get_repository(self, name: str) -> RemoteRepository:
check.str_param(name, "name")
return self.get_repositories()[name]
def has_repository(self, name: str) -> bool:
return name in self.get_repositories()
def get_repositories(self) -> Mapping[str, RemoteRepository]:
return self.remote_repositories
def get_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, Any],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
instance: Optional[DagsterInstance] = None,
) -> RemoteExecutionPlan:
from dagster._api.snapshot_execution_plan import sync_get_external_execution_plan_grpc
check.inst_param(remote_job, "remote_job", RemoteJob)
run_config = check.mapping_param(run_config, "run_config")
check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)
check.opt_inst_param(known_state, "known_state", KnownExecutionState)
check.opt_inst_param(instance, "instance", DagsterInstance)
asset_selection = (
frozenset(check.opt_set_param(remote_job.asset_selection, "asset_selection"))
if remote_job.asset_selection is not None
else None
)
asset_check_selection = (
frozenset(
check.opt_set_param(remote_job.asset_check_selection, "asset_check_selection")
)
if remote_job.asset_check_selection is not None
else None
)
execution_plan_snapshot_or_error = sync_get_external_execution_plan_grpc(
api_client=self.client,
job_origin=remote_job.get_remote_origin(),
run_config=run_config,
job_snapshot_id=remote_job.identifying_job_snapshot_id,
asset_selection=asset_selection,
asset_check_selection=asset_check_selection,
op_selection=remote_job.op_selection,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=instance,
)
return RemoteExecutionPlan(execution_plan_snapshot=execution_plan_snapshot_or_error)
@checked
async def gen_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, Any],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
instance: Optional[DagsterInstance] = None,
) -> RemoteExecutionPlan:
from dagster._api.snapshot_execution_plan import gen_external_execution_plan_grpc
asset_selection = (
frozenset(check.opt_set_param(remote_job.asset_selection, "asset_selection"))
if remote_job.asset_selection is not None
else None
)
asset_check_selection = (
frozenset(
check.opt_set_param(remote_job.asset_check_selection, "asset_check_selection")
)
if remote_job.asset_check_selection is not None
else None
)
execution_plan_snapshot_or_error = await gen_external_execution_plan_grpc(
api_client=self.client,
job_origin=remote_job.get_remote_origin(),
run_config=run_config,
job_snapshot_id=remote_job.identifying_job_snapshot_id,
asset_selection=asset_selection,
asset_check_selection=asset_check_selection,
op_selection=remote_job.op_selection,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=instance,
)
return RemoteExecutionPlan(execution_plan_snapshot=execution_plan_snapshot_or_error)
def _get_subset_remote_job_result(
self, selector: JobSubsetSelector, get_full_job: Callable[[JobSubsetSelector], RemoteJob]
) -> RemoteJobSubsetResult:
from dagster._api.snapshot_job import sync_get_external_job_subset_grpc
check.inst_param(selector, "selector", JobSubsetSelector)
check.invariant(
selector.location_name == self.name,
f"PipelineSelector location_name mismatch, got {selector.location_name} expected"
f" {self.name}",
)
remote_repository = self.get_repository(selector.repository_name)
job_handle = JobHandle(selector.job_name, remote_repository.handle)
subset = sync_get_external_job_subset_grpc(
self.client,
job_handle.get_remote_origin(),
include_parent_snapshot=False,
op_selection=selector.op_selection,
asset_selection=selector.asset_selection,
asset_check_selection=selector.asset_check_selection,
)
# Omit the parent job snapshot for __ASSET_JOB, since it is potentialy very large
# and unlikely to be useful (unlike subset selections of other jobs)
if subset.job_data_snap and not is_implicit_asset_job_name(selector.job_name):
full_job = get_full_job(selector)
subset = copy(
subset,
job_data_snap=copy(subset.job_data_snap, parent_job=full_job.job_snapshot),
)
return subset
async def _gen_subset_remote_job_result(
self, selector: JobSubsetSelector, get_full_job: Callable[[JobSubsetSelector], RemoteJob]
) -> "RemoteJobSubsetResult":
from dagster._api.snapshot_job import gen_external_job_subset_grpc
check.inst_param(selector, "selector", JobSubsetSelector)
check.invariant(
selector.location_name == self.name,
f"PipelineSelector location_name mismatch, got {selector.location_name} expected"
f" {self.name}",
)
remote_repository = self.get_repository(selector.repository_name)
job_handle = JobHandle(selector.job_name, remote_repository.handle)
subset = await gen_external_job_subset_grpc(
self.client,
job_handle.get_remote_origin(),
include_parent_snapshot=False,
op_selection=selector.op_selection,
asset_selection=selector.asset_selection,
asset_check_selection=selector.asset_check_selection,
)
if subset.job_data_snap:
full_job = get_full_job(selector)
subset = copy(
subset,
job_data_snap=copy(subset.job_data_snap, parent_job=full_job.job_snapshot),
)
return subset
def get_partition_config(
self,
repository_handle: RepositoryHandle,
job_name: str,
partition_name: str,
instance: DagsterInstance,
) -> "PartitionConfigSnap":
from dagster._api.snapshot_partition import sync_get_external_partition_config_grpc
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(job_name, "job_name")
check.str_param(partition_name, "partition_name")
return sync_get_external_partition_config_grpc(
self.client, repository_handle, job_name, partition_name, instance
)
def get_partition_tags_from_repo(
self,
repository_handle: RepositoryHandle,
job_name: str,
partition_name: str,
instance: DagsterInstance,
) -> "PartitionTagsSnap":
from dagster._api.snapshot_partition import sync_get_external_partition_tags_grpc
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(job_name, "job_name")
check.str_param(partition_name, "partition_name")
return sync_get_external_partition_tags_grpc(
self.client, repository_handle, job_name, partition_name, instance
)
def get_partition_names_from_repo(
self, repository_handle: RepositoryHandle, job_name: str
) -> Union[PartitionNamesSnap, "PartitionExecutionErrorSnap"]:
from dagster._api.snapshot_partition import sync_get_external_partition_names_grpc
return sync_get_external_partition_names_grpc(self.client, repository_handle, job_name)
def get_schedule_execution_data(
self,
instance: DagsterInstance,
repository_handle: RepositoryHandle,
schedule_name: str,
scheduled_execution_time: Optional[TimestampWithTimezone],
log_key: Optional[Sequence[str]],
) -> "ScheduleExecutionData":
from dagster._api.snapshot_schedule import sync_get_external_schedule_execution_data_grpc
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(schedule_name, "schedule_name")
check.opt_inst_param(
scheduled_execution_time, "scheduled_execution_time", TimestampWithTimezone
)
check.opt_list_param(log_key, "log_key", of_type=str)
return sync_get_external_schedule_execution_data_grpc(
self.client,
instance,
repository_handle,
schedule_name,
scheduled_execution_time,
log_key,
)
def get_sensor_execution_data(
self,
instance: DagsterInstance,
repository_handle: RepositoryHandle,
name: str,
last_tick_completion_time: Optional[float],
last_run_key: Optional[str],
cursor: Optional[str],
log_key: Optional[Sequence[str]],
last_sensor_start_time: Optional[float],
) -> "SensorExecutionData":
from dagster._api.snapshot_sensor import sync_get_external_sensor_execution_data_grpc
return sync_get_external_sensor_execution_data_grpc(
self.client,
instance,
repository_handle,
name,
last_tick_completion_time,
last_run_key,
cursor,
log_key,
last_sensor_start_time,
)
def get_partition_set_execution_params(
self,
repository_handle: RepositoryHandle,
partition_set_name: str,
partition_names: Sequence[str],
instance: DagsterInstance,
) -> "PartitionSetExecutionParamSnap":
from dagster._api.snapshot_partition import (
sync_get_external_partition_set_execution_param_data_grpc,
)
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(partition_set_name, "partition_set_name")
check.sequence_param(partition_names, "partition_names", of_type=str)
return sync_get_external_partition_set_execution_param_data_grpc(
self.client,
repository_handle,
partition_set_name,
partition_names,
instance,
)
def get_notebook_data(self, notebook_path: str) -> bytes:
from dagster._api.notebook_data import sync_get_streaming_external_notebook_data_grpc
check.str_param(notebook_path, "notebook_path")
return sync_get_streaming_external_notebook_data_grpc(self.client, notebook_path)
def get_dagster_library_versions(self) -> Optional[Mapping[str, str]]:
return self._dagster_library_versions
def is_implicit_asset_job_name(job_name: str) -> bool:
return job_name.startswith(IMPLICIT_ASSET_JOB_NAME)
| GrpcServerCodeLocation |
python | getsentry__sentry | src/sentry/monitors/endpoints/organization_monitor_index.py | {
"start": 2257,
"end": 14534
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
# TODO(davidenwang): After this is merged and good to go, make this public
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.CRONS
permission_classes = (OrganizationAlertRulePermission,)
@extend_schema(
operation_id="Retrieve Monitors for an Organization",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
OrganizationParams.PROJECT,
GlobalParams.ENVIRONMENT,
MonitorParams.OWNER,
],
responses={
200: inline_sentry_response_serializer("MonitorList", list[MonitorSerializerResponse]),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: AuthenticatedHttpRequest, organization: Organization) -> Response:
"""
Lists monitors, including nested monitor environments. May be filtered to a project or environment.
"""
try:
filter_params = self.get_filter_params(request, organization, date_filter_optional=True)
except NoProjects:
return self.respond([])
queryset = Monitor.objects.filter(
organization_id=organization.id, project_id__in=filter_params["project_id"]
).exclude(
status__in=[
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]
)
query = request.GET.get("query")
owners = request.GET.getlist("owner")
is_asc = request.GET.get("asc", "1") == "1"
sort = request.GET.get("sort", "status")
environments = filter_params.get("environment_objects")
if environments is not None:
environment_ids = [e.id for e in environments]
# use a distinct() filter as queries spanning multiple tables can include duplicates
if request.GET.get("includeNew"):
queryset = queryset.filter(
Q(monitorenvironment__environment_id__in=environment_ids)
| Q(monitorenvironment=None)
).distinct()
else:
queryset = queryset.filter(
monitorenvironment__environment_id__in=environment_ids
).distinct()
else:
environments = list(Environment.objects.filter(organization_id=organization.id))
# sort monitors by top monitor environment, then by latest check-in
monitor_environments_query = MonitorEnvironment.objects.filter(
monitor__id=OuterRef("id"), environment_id__in=[e.id for e in environments]
)
sort_fields = []
if sort == "status":
# Check if all environments are muted by seeing if any unmuted environments exist
has_unmuted_env = monitor_environments_query.filter(is_muted=False)
queryset = queryset.annotate(
environment_status_ordering=Case(
# Sort DISABLED and fully muted monitors to the bottom of the list
When(status=ObjectStatus.DISABLED, then=Value(len(DEFAULT_STATUS_ORDER) + 1)),
When(
Exists(monitor_environments_query) & ~Exists(has_unmuted_env),
then=Value(len(DEFAULT_STATUS_ORDER)),
),
default=Subquery(
monitor_environments_query.annotate(
status_ordering=MONITOR_ENVIRONMENT_ORDERING
)
.order_by("status_ordering")
.values("status_ordering")[:1],
output_field=IntegerField(),
),
)
)
queryset = queryset.annotate(
last_checkin_monitorenvironment=Subquery(
monitor_environments_query.order_by("-last_checkin").values("last_checkin")[:1],
output_field=DateTimeField(),
)
)
sort_fields = ["environment_status_ordering", "-last_checkin_monitorenvironment"]
elif sort == "name":
sort_fields = ["name"]
elif sort == "muted":
# Check if any environments are muted
has_muted_env = monitor_environments_query.filter(is_muted=True)
# Check if all environments are muted
has_unmuted_env = monitor_environments_query.filter(is_muted=False)
queryset = queryset.annotate(
muted_ordering=Case(
# No environments muted (or no environments at all)
When(~Exists(has_muted_env), then=Value(0)),
# Some environments muted (not all)
When(Exists(has_muted_env) & Exists(has_unmuted_env), then=Value(1)),
# All environments muted (and at least one environment exists)
When(
Exists(monitor_environments_query) & ~Exists(has_unmuted_env), then=Value(2)
),
default=0,
),
)
sort_fields = ["muted_ordering", "name"]
if not is_asc:
sort_fields = [flip_sort_direction(sort_field) for sort_field in sort_fields]
if owners:
owners_set = set(owners)
# Remove special values from owners, this can't be parsed as an Actor
include_myteams = "myteams" in owners_set
owners_set.discard("myteams")
include_unassigned = "unassigned" in owners_set
owners_set.discard("unassigned")
actors = [Actor.from_identifier(identifier) for identifier in owners_set]
user_ids = [actor.id for actor in actors if actor.is_user]
team_ids = [actor.id for actor in actors if actor.is_team]
teams = get_teams(
request,
organization,
teams=[*team_ids, *(["myteams"] if include_myteams else [])],
)
team_ids = [team.id for team in teams]
owner_filter = Q(owner_user_id__in=user_ids) | Q(owner_team_id__in=team_ids)
if include_unassigned:
unassigned_filter = Q(owner_user_id=None) & Q(owner_team_id=None)
queryset = queryset.filter(unassigned_filter | owner_filter)
else:
queryset = queryset.filter(owner_filter)
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "query":
text = " ".join(value)
queryset = queryset.filter(
Q(name__icontains=text) | Q(id__iexact=text) | Q(slug__icontains=text)
)
elif key == "id":
queryset = queryset.filter(in_iexact("id", value))
elif key == "name":
queryset = queryset.filter(in_iexact("name", value))
elif key == "status":
try:
queryset = queryset.filter(
monitorenvironment__status__in=map_value_to_constant(
MonitorStatus, value
)
)
except ValueError:
queryset = queryset.none()
else:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by=sort_fields,
on_results=lambda x: serialize(
x, request.user, MonitorSerializer(environments=environments)
),
paginator_cls=OffsetPaginator,
)
@extend_schema(
operation_id="Create a Monitor",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=MonitorValidator,
responses={
201: MonitorSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def post(self, request: AuthenticatedHttpRequest, organization) -> Response:
"""
Create a new monitor.
"""
validator = MonitorValidator(
data=request.data,
context={"organization": organization, "access": request.access, "request": request},
)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
monitor = validator.save()
return self.respond(serialize(monitor, request.user), status=201)
@extend_schema(
operation_id="Bulk Edit Monitors",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=MonitorBulkEditValidator,
responses={
200: inline_sentry_response_serializer(
"MonitorBulkEditResponse", MonitorBulkEditResponse
),
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def put(self, request: AuthenticatedHttpRequest, organization) -> Response:
"""
Bulk edit the muted and disabled status of a list of monitors determined by slug
"""
validator = MonitorBulkEditValidator(
data=request.data,
partial=True,
context={
"organization": organization,
"access": request.access,
},
)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = dict(validator.validated_data)
projects = self.get_projects(request, organization, include_all_accessible=True)
project_ids = [project.id for project in projects]
monitor_guids = result.pop("ids", [])
monitors = list(Monitor.objects.filter(guid__in=monitor_guids, project_id__in=project_ids))
status = result.get("status")
# If enabling monitors, ensure we can assign all before moving forward
if status == ObjectStatus.ACTIVE:
assign_result = quotas.backend.check_assign_seats(DataCategory.MONITOR_SEAT, monitors)
if not assign_result.assignable:
return self.respond(assign_result.reason, status=400)
# Extract is_muted to propagate to environments, don't update Monitor directly
is_muted = result.pop("is_muted", None)
updated = []
errored = []
for monitor in monitors:
with transaction.atomic(router.db_for_write(Monitor)):
# Attempt to assign a monitor seat
if status == ObjectStatus.ACTIVE:
outcome = quotas.backend.assign_seat(DataCategory.MONITOR_SEAT, monitor)
if outcome != Outcome.ACCEPTED:
errored.append(monitor)
continue
# Attempt to unassign the monitor seat
if status == ObjectStatus.DISABLED:
quotas.backend.disable_seat(DataCategory.MONITOR_SEAT, monitor)
# Propagate is_muted to all monitor environments
if is_muted is not None:
MonitorEnvironment.objects.filter(monitor_id=monitor.id).update(
is_muted=is_muted
)
if result:
monitor.update(**result)
updated.append(monitor)
self.create_audit_entry(
request=request,
organization=organization,
target_object=monitor.id,
event=audit_log.get_event_id("MONITOR_EDIT"),
data=monitor.get_audit_log_data(),
)
return self.respond(
{
"updated": serialize(list(updated), request.user),
"errored": serialize(list(errored), request.user),
},
)
| OrganizationMonitorIndexEndpoint |
python | huggingface__transformers | tests/models/bert/test_modeling_bert.py | {
"start": 28034,
"end": 30294
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = BertModel.from_pretrained("google-bert/bert-base-uncased")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]])
torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@pytest.mark.torch_export_test
def test_export(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
bert_model = "google-bert/bert-base-uncased"
device = "cpu"
attn_implementation = "sdpa"
max_length = 512
tokenizer = AutoTokenizer.from_pretrained(bert_model)
inputs = tokenizer(
"the man worked as a [MASK].",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = BertForMaskedLM.from_pretrained(
bert_model,
device_map=device,
attn_implementation=attn_implementation,
use_cache=True,
)
logits = model(**inputs).logits
eg_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask.split(), ["carpenter", "waiter", "barber", "mechanic", "salesman"])
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
ep_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask, ep_predicted_mask)
| BertModelIntegrationTest |
python | wandb__wandb | wandb/sdk/internal/progress.py | {
"start": 283,
"end": 2289
} | class ____:
"""A helper class for displaying progress."""
ITER_BYTES = 1024 * 1024
def __init__(
self, file: IO[bytes], callback: Optional["ProgressFn"] = None
) -> None:
self.file = file
if callback is None:
def callback_(new_bytes: int, total_bytes: int) -> None:
pass
callback = callback_
self.callback: ProgressFn = callback
self.bytes_read = 0
self.len = os.fstat(file.fileno()).st_size
def read(self, size=-1):
"""Read bytes and call the callback."""
bites = self.file.read(size)
self.bytes_read += len(bites)
if not bites and self.bytes_read < self.len:
# Files shrinking during uploads causes request timeouts. Maybe
# we could avoid those by updating the self.len in real-time, but
# files getting truncated while uploading seems like something
# that shouldn't really be happening anyway.
raise CommError(
f"File {self.file.name} size shrank from {self.len} to {self.bytes_read} while it was being uploaded."
)
# Growing files are also likely to be bad, but our code didn't break
# on those in the past, so it's riskier to make that an error now.
self.callback(len(bites), self.bytes_read)
return bites
def rewind(self) -> None:
self.callback(-self.bytes_read, 0)
self.bytes_read = 0
self.file.seek(0)
def __getattr__(self, name):
"""Fallback to the file object for attrs not defined here."""
if hasattr(self.file, name):
return getattr(self.file, name)
else:
raise AttributeError
def __iter__(self):
return self
def __next__(self):
bites = self.read(self.ITER_BYTES)
if len(bites) == 0:
raise StopIteration
return bites
def __len__(self):
return self.len
next = __next__
| Progress |
python | numba__numba | numba/testing/main.py | {
"start": 19833,
"end": 22642
} | class ____(runner.TextTestResult):
warmup = 3
repetitions = 6
def _huntLeaks(self, test):
self.stream.flush()
repcount = self.repetitions
nwarmup = self.warmup
rc_deltas = [0] * (repcount - nwarmup)
alloc_deltas = [0] * (repcount - nwarmup)
# Preallocate ints likely to be stored in rc_deltas and alloc_deltas,
# to make sys.getallocatedblocks() less flaky.
_int_pool = IntPool()
for i in range(-200, 200):
_int_pool[i]
for i in range(repcount):
# Use a pristine, silent result object to avoid recursion
res = result.TestResult()
test.run(res)
# Poorly-written tests may fail when run several times.
# In this case, abort the refleak run and report the failure.
if not res.wasSuccessful():
self.failures.extend(res.failures)
self.errors.extend(res.errors)
raise AssertionError
del res
alloc_after, rc_after = _refleak_cleanup()
if i >= nwarmup:
rc_deltas[i - nwarmup] = _int_pool[rc_after - rc_before]
alloc_deltas[i - nwarmup] = _int_pool[alloc_after - alloc_before]
alloc_before, rc_before = alloc_after, rc_after
return rc_deltas, alloc_deltas
def addSuccess(self, test):
try:
rc_deltas, alloc_deltas = self._huntLeaks(test)
except AssertionError:
# Test failed when repeated
assert not self.wasSuccessful()
return
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= set((1, 0, -1)):
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas, item_name, sum(deltas))
failed = True
try:
raise ReferenceLeakError(msg)
except Exception:
exc_info = sys.exc_info()
if self.showAll:
self.stream.write("%s = %r " % (item_name, deltas))
self.addFailure(test, exc_info)
if not failed:
super(RefleakTestResult, self).addSuccess(test)
| RefleakTestResult |
python | getsentry__sentry | tests/sentry/sentry_metrics/test_kafka.py | {
"start": 585,
"end": 2665
} | class ____(GenericMetricsTestMixIn, TestCase):
@pytest.mark.django_db
@thread_leaks.thread_leak_allowlist(reason="kafka tests", issue=97046)
def test_produce_metrics(self) -> None:
generic_metrics_backend = KafkaMetricsBackend()
# For testing, we are calling close() here because we
# are swapping out the KafkaProducer
# with a LocalProducer, but regardless,
# close() must always be called in order to close
# the backend's KafkaProducer
generic_metrics_backend.close()
my_topic = Topic("my-topic")
clock = Clock()
broker_storage: MemoryMessageStorage[KafkaPayload] = MemoryMessageStorage()
broker: LocalBroker[KafkaPayload] = LocalBroker(broker_storage, clock)
broker.create_topic(my_topic, partitions=1)
generic_metrics_backend.producer = LocalProducer(broker)
generic_metrics_backend.kafka_topic = my_topic
# produce a counter metric onto the second offset
generic_metrics_backend.counter(
self.use_case_id,
self.org_id,
self.project_id,
self.metric_name,
self.counter_value,
self.metrics_tags,
self.unit,
)
counter_metric = {
"org_id": self.org_id,
"project_id": self.project_id,
"name": self.get_mri(self.metric_name, "c", self.use_case_id, self.unit),
"value": self.counter_value,
"timestamp": int(datetime.now().timestamp()),
"tags": self.metrics_tags,
"retention_days": self.retention_days,
"type": "c",
}
counter_value = json.dumps(counter_metric).encode("utf-8")
produced_message = broker_storage.consume(Partition(my_topic, 0), 0)
assert produced_message is not None
assert produced_message.payload.value == counter_value
# check that there's no other remaining message in the topic
assert broker_storage.consume(Partition(my_topic, 0), 1) is None
| KafkaMetricsInterfaceTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-average-subarray-ii.py | {
"start": 29,
"end": 910
} | class ____(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
def getDelta(avg, nums, k):
accu = [0.0] * (len(nums) + 1)
minval_pos = None
delta = 0.0
for i in xrange(len(nums)):
accu[i+1] = nums[i] + accu[i] - avg
if i >= (k-1):
if minval_pos == None or accu[i-k+1] < accu[minval_pos]:
minval_pos = i-k+1
if accu[i+1] - accu[minval_pos] >= 0:
delta = max(delta, (accu[i+1] - accu[minval_pos]) / (i+1 - minval_pos))
return delta
left, delta = min(nums), float("inf")
while delta > 1e-5:
delta = getDelta(left, nums, k)
left += delta
return left
| Solution |
python | streamlit__streamlit | lib/tests/streamlit/cli_util_test.py | {
"start": 789,
"end": 2236
} | class ____(unittest.TestCase):
@parameterized.expand(
[("Linux", False, True), ("Windows", True, False), ("Darwin", False, True)]
)
def test_open_browser(self, os_type, webbrowser_expect, popen_expect):
"""Test web browser opening scenarios."""
from streamlit import env_util
env_util.IS_WINDOWS = os_type == "Windows"
env_util.IS_DARWIN = os_type == "Darwin"
env_util.IS_LINUX_OR_BSD = os_type == "Linux"
with patch("streamlit.env_util.is_executable_in_path", return_value=True):
with patch("webbrowser.open") as webbrowser_open:
with patch("subprocess.Popen") as subprocess_popen:
open_browser("http://some-url")
assert webbrowser_expect == webbrowser_open.called
assert popen_expect == subprocess_popen.called
def test_open_browser_linux_no_xdg(self):
"""Test opening the browser on Linux with no xdg installed"""
from streamlit import env_util
env_util.IS_LINUX_OR_BSD = True
with patch("streamlit.env_util.is_executable_in_path", return_value=False):
with patch("webbrowser.open") as webbrowser_open:
with patch("subprocess.Popen") as subprocess_popen:
open_browser("http://some-url")
assert webbrowser_open.called
assert not subprocess_popen.called
| CliUtilTest |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 58835,
"end": 59961
} | class ____(CBaseTypeNode):
"""
Represents a fused type in a ctypedef statement:
ctypedef cython.fused_type(int, long, long long) integral
name str name of this fused type
types [CSimpleBaseTypeNode] is the list of types to be fused
"""
child_attrs = []
def analyse_declarations(self, env):
type = self.analyse(env)
entry = env.declare_typedef(self.name, type, self.pos)
# Omit the typedef declaration that self.declarator would produce
entry.in_cinclude = True
def analyse(self, env, could_be_name=False):
types = []
for type_node in self.types:
type = type_node.analyse_as_type(env)
if not type:
error(type_node.pos, "Not a type")
continue
if type in types:
error(type_node.pos, "Type specified multiple times")
else:
types.append(type)
# if len(self.types) == 1:
# return types[0]
return PyrexTypes.FusedType(types, name=self.name)
| FusedTypeNode |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 22034,
"end": 22126
} | class ____(TopLevel):
__slots__ = ("args", "returns", "decorator_list", "pos")
| FunctionDef |
python | scrapy__scrapy | scrapy/crawler.py | {
"start": 23392,
"end": 26355
} | class ____(CrawlerProcessBase, CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a :mod:`~twisted.internet.reactor` and handling shutdown
signals, like the keyboard interrupt command Ctrl-C. It also configures
top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
:mod:`~twisted.internet.reactor` within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
:param install_root_handler: whether to install root logging handler
(default: True)
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides Deferred-based APIs. Use :class:`AsyncCrawlerProcess`
for modern coroutine APIs.
"""
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings, install_root_handler)
self._initialized_reactor: bool = False
logger.debug("Using CrawlerProcess")
def _create_crawler(self, spidercls: type[Spider] | str) -> Crawler:
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
init_reactor = not self._initialized_reactor
self._initialized_reactor = True
return Crawler(spidercls, self.settings, init_reactor=init_reactor)
def _stop_dfd(self) -> Deferred[Any]:
return self.stop()
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
"""
This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool
size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache
based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If ``stop_after_crawl`` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param bool stop_after_crawl: stop or not the reactor when all
crawlers have finished
:param bool install_signal_handlers: whether to install the OS signal
handlers from Twisted and Scrapy (default: True)
"""
from twisted.internet import reactor
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
reactor.run(installSignalHandlers=install_signal_handlers) # blocking call
| CrawlerProcess |
python | django-guardian__django-guardian | guardian/models/models.py | {
"start": 4403,
"end": 5431
} | class ____(UserObjectPermissionAbstract):
"""The default implementation of the UserObjectPermissionAbstract model.
If `GUARDIAN_USER_OBJ_PERMS_MODEL` is not set at the beginning of the project, this model will be used.
Uses Django's contenttypes framework to store generic relations.
See Also:
- [Django's Documentation on Abstract Base Models](https://docs.djangoproject.com/en/stable/topics/db/models/#abstract-base-classes)
- [Django-Guardian Performance Tuning](https://django-guardian.readthedocs.io/en/stable/userguide/performance.html)
- [How to override the default UserObjectPermission](https://django-guardian.readthedocs.io/en/stable/configuration.html#guardian-user-obj-perms-model)
"""
class Meta(UserObjectPermissionAbstract.Meta):
abstract = False
indexes = [
models.Index(fields=["permission", "user", "content_type", "object_pk"]),
models.Index(fields=["user", "content_type", "object_pk"]),
]
| UserObjectPermission |
python | great-expectations__great_expectations | great_expectations/experimental/metric_repository/batch_inspector.py | {
"start": 498,
"end": 2308
} | class ____:
"""A BatchInspector is responsible for computing metrics for a batch of data.
It uses MetricRetriever objects to retrieve metrics.
"""
def __init__(self, context: AbstractDataContext, metric_retrievers: list[MetricRetriever]):
self._context = context
self._metric_retrievers = metric_retrievers
def compute_metric_list_run(
self,
data_asset_id: uuid.UUID,
batch_request: BatchRequest,
metric_list: Optional[List[MetricTypes]],
) -> MetricRun:
"""Method that computes a MetricRun for a list of metrics.
Called by GX Agent to compute a MetricRun as part of a RunMetricsEvent.
Args:
data_asset_id (uuid.UUID): current data asset id.
batch_request (BatchRequest): BatchRequest for current batch.
metrics_list (Optional[List[MetricTypes]]): List of metrics to compute.
Returns:
MetricRun: _description_
"""
# TODO: eventually we will keep this and retire `compute_metric_run`.
metrics: list[Metric] = []
for metric_retriever in self._metric_retrievers:
metrics.extend(
metric_retriever.get_metrics(batch_request=batch_request, metric_list=metric_list)
)
return MetricRun(data_asset_id=data_asset_id, metrics=metrics)
def compute_metric_run(
self, data_asset_id: uuid.UUID, batch_request: BatchRequest
) -> MetricRun:
metrics: list[Metric] = []
for metric_retriever in self._metric_retrievers:
metrics.extend(metric_retriever.get_metrics(batch_request=batch_request))
return MetricRun(data_asset_id=data_asset_id, metrics=metrics)
def _generate_run_id(self) -> uuid.UUID:
return uuid.uuid4()
| BatchInspector |
python | realpython__materials | python-all-attribute/shapes_v1.py | {
"start": 29,
"end": 174
} | class ____:
def __init__(self, radius):
self.radius = _validate(radius)
def area(self):
return _pi * self.radius**2
| Circle |
python | psf__black | src/black/mode.py | {
"start": 6727,
"end": 7858
} | class ____(Enum):
"""Individual preview style features."""
# NOTE: string_processing requires wrap_long_dict_values_in_parens
# for https://github.com/psf/black/issues/3117 to be fixed.
string_processing = auto()
hug_parens_with_braces_and_square_brackets = auto()
wrap_long_dict_values_in_parens = auto()
multiline_string_handling = auto()
always_one_newline_after_import = auto()
fix_fmt_skip_in_one_liners = auto()
standardize_type_comments = auto()
wrap_comprehension_in = auto()
# Remove parentheses around multiple exception types in except and
# except* without as. See PEP 758 for details.
remove_parens_around_except_types = auto()
normalize_cr_newlines = auto()
fix_module_docstring_detection = auto()
fix_type_expansion_split = auto()
remove_parens_from_assignment_lhs = auto()
UNSTABLE_FEATURES: set[Preview] = {
# Many issues, see summary in https://github.com/psf/black/issues/4042
Preview.string_processing,
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
Preview.hug_parens_with_braces_and_square_brackets,
}
| Preview |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_landscape01.py | {
"start": 315,
"end": 906
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("landscape01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "Foo")
worksheet.set_landscape()
worksheet.set_paper(9)
worksheet.vertical_dpi = 200
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ray-project__ray | python/ray/data/_internal/iterator/iterator_impl.py | {
"start": 357,
"end": 1182
} | class ____(DataIterator):
def __init__(
self,
base_dataset: "Dataset",
):
self._base_dataset = base_dataset
def __repr__(self) -> str:
return f"DataIterator({self._base_dataset})"
def _to_ref_bundle_iterator(
self,
) -> Tuple[Iterator[RefBundle], Optional[DatasetStats], bool]:
ref_bundles_iterator, stats = self._base_dataset._execute_to_iterator()
return ref_bundles_iterator, stats, False
def stats(self) -> str:
return self._base_dataset.stats()
def schema(self) -> Union[type, "pyarrow.lib.Schema"]:
return self._base_dataset.schema()
def get_context(self) -> DataContext:
return self._base_dataset.context
def _get_dataset_tag(self):
return self._base_dataset.get_dataset_id()
| DataIteratorImpl |
python | jazzband__django-pipeline | pipeline/conf.py | {
"start": 2342,
"end": 3380
} | class ____(MutableMapping):
"""
Container object for pipeline settings
"""
def __init__(self, wrapped_settings):
self.settings = DEFAULTS.copy()
self.settings.update(wrapped_settings)
def __getitem__(self, key):
value = self.settings[key]
if key.endswith(("_BINARY", "_ARGUMENTS")):
if isinstance(value, (str,)):
return tuple(shlex.split(value, posix=(os.name == "posix")))
return tuple(value)
return value
def __setitem__(self, key, value):
self.settings[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.settings)
def __len__(self):
return len(self.settings)
def __getattr__(self, name):
return self.__getitem__(name)
settings = PipelineSettings(_settings.PIPELINE)
@receiver(setting_changed)
def reload_settings(**kwargs):
if kwargs["setting"] == "PIPELINE":
settings.update(kwargs["value"])
| PipelineSettings |
python | Textualize__textual | src/textual/widget.py | {
"start": 6373,
"end": 6561
} | class ____(NamedTuple):
"""Stores results of a previous render."""
size: Size
"""The size of the render."""
lines: list[Strip]
"""Contents of the render."""
| _RenderCache |
python | doocs__leetcode | solution/0300-0399/0314.Binary Tree Vertical Order Traversal/Solution2.py | {
"start": 192,
"end": 748
} | class ____:
def verticalOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
if root is None:
return []
q = deque([(root, 0)])
d = defaultdict(list)
while q:
for _ in range(len(q)):
root, offset = q.popleft()
d[offset].append(root.val)
if root.left:
q.append((root.left, offset - 1))
if root.right:
q.append((root.right, offset + 1))
return [v for _, v in sorted(d.items())]
| Solution |
python | ray-project__ray | rllib/execution/buffers/mixin_replay_buffer.py | {
"start": 529,
"end": 6842
} | class ____:
"""This buffer adds replayed samples to a stream of new experiences.
- Any newly added batch (`add()`) is immediately returned upon
the next `replay` call (close to on-policy) as well as being moved
into the buffer.
- Additionally, a certain number of old samples is mixed into the
returned sample according to a given "replay ratio".
- If >1 calls to `add()` are made without any `replay()` calls
in between, all newly added batches are returned (plus some older samples
according to the "replay ratio").
.. testcode::
from ray.rllib.execution.buffers.mixin_replay_buffer import (
MixInMultiAgentReplayBuffer)
from ray.rllib.policy.sample_batch import SampleBatch
# replay ratio 0.66 (2/3 replayed, 1/3 new samples):
buffer = MixInMultiAgentReplayBuffer(capacity=100,
replay_ratio=0.66)
A, B, C = (SampleBatch({"obs": [1]}), SampleBatch({"obs": [2]}),
SampleBatch({"obs": [3]}))
buffer.add(A)
buffer.add(B)
buffer.add(B)
print(buffer.replay()["obs"])
.. testoutput::
:hide:
...
"""
def __init__(
self,
capacity: int,
replay_ratio: float,
replay_mode: ReplayMode = ReplayMode.INDEPENDENT,
):
"""Initializes MixInReplay instance.
Args:
capacity: Number of batches to store in total.
replay_ratio: Ratio of replayed samples in the returned
batches. E.g. a ratio of 0.0 means only return new samples
(no replay), a ratio of 0.5 means always return newest sample
plus one old one (1:1), a ratio of 0.66 means always return
the newest sample plus 2 old (replayed) ones (1:2), etc...
"""
self.capacity = capacity
self.replay_ratio = replay_ratio
self.replay_proportion = None
if self.replay_ratio != 1.0:
self.replay_proportion = self.replay_ratio / (1.0 - self.replay_ratio)
if replay_mode in ["lockstep", ReplayMode.LOCKSTEP]:
self.replay_mode = ReplayMode.LOCKSTEP
elif replay_mode in ["independent", ReplayMode.INDEPENDENT]:
self.replay_mode = ReplayMode.INDEPENDENT
else:
raise ValueError("Unsupported replay mode: {}".format(replay_mode))
def new_buffer():
return SimpleReplayBuffer(num_slots=capacity)
self.replay_buffers = collections.defaultdict(new_buffer)
# Metrics.
self.add_batch_timer = _Timer()
self.replay_timer = _Timer()
self.update_priorities_timer = _Timer()
# Added timesteps over lifetime.
self.num_added = 0
# Last added batch(es).
self.last_added_batches = collections.defaultdict(list)
def add(self, batch: SampleBatchType) -> None:
"""Adds a batch to the appropriate policy's replay buffer.
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
it is not a MultiAgentBatch. Subsequently adds the individual policy
batches to the storage.
Args:
batch: The batch to be added.
"""
# Make a copy so the replay buffer doesn't pin plasma memory.
batch = batch.copy()
batch = batch.as_multi_agent()
with self.add_batch_timer:
if self.replay_mode == ReplayMode.LOCKSTEP:
# Lockstep mode: Store under _ALL_POLICIES key (we will always
# only sample from all policies at the same time).
# This means storing a MultiAgentBatch to the underlying buffer
self.replay_buffers[_ALL_POLICIES].add_batch(batch)
self.last_added_batches[_ALL_POLICIES].append(batch)
else:
# Store independent SampleBatches
for policy_id, sample_batch in batch.policy_batches.items():
self.replay_buffers[policy_id].add_batch(sample_batch)
self.last_added_batches[policy_id].append(sample_batch)
self.num_added += batch.count
def replay(
self, policy_id: PolicyID = DEFAULT_POLICY_ID
) -> Optional[SampleBatchType]:
if self.replay_mode == ReplayMode.LOCKSTEP and policy_id != _ALL_POLICIES:
raise ValueError(
"Trying to sample from single policy's buffer in lockstep "
"mode. In lockstep mode, all policies' experiences are "
"sampled from a single replay buffer which is accessed "
"with the policy id `{}`".format(_ALL_POLICIES)
)
buffer = self.replay_buffers[policy_id]
# Return None, if:
# - Buffer empty or
# - `replay_ratio` < 1.0 (new samples required in returned batch)
# and no new samples to mix with replayed ones.
if len(buffer) == 0 or (
len(self.last_added_batches[policy_id]) == 0 and self.replay_ratio < 1.0
):
return None
# Mix buffer's last added batches with older replayed batches.
with self.replay_timer:
output_batches = self.last_added_batches[policy_id]
self.last_added_batches[policy_id] = []
# No replay desired -> Return here.
if self.replay_ratio == 0.0:
return concat_samples(output_batches)
# Only replay desired -> Return a (replayed) sample from the
# buffer.
elif self.replay_ratio == 1.0:
return buffer.replay()
# Replay ratio = old / [old + new]
# Replay proportion: old / new
num_new = len(output_batches)
replay_proportion = self.replay_proportion
while random.random() < num_new * replay_proportion:
replay_proportion -= 1
output_batches.append(buffer.replay())
return concat_samples(output_batches)
def get_host(self) -> str:
"""Returns the computer's network name.
Returns:
The computer's networks name or an empty string, if the network
name could not be determined.
"""
return platform.node()
| MixInMultiAgentReplayBuffer |
python | sqlalchemy__sqlalchemy | test/orm/test_events.py | {
"start": 73213,
"end": 75432
} | class ____(_fixtures.FixtureTest):
run_inserts = None
def test_attr_propagated(self):
User = self.classes.User
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class AdminUser(User):
pass
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
fn = Mock()
event.listen(User.name, "set", fn, propagate=True)
au = AdminUser()
au.name = "ed"
eq_(fn.call_count, 1)
event.remove(User.name, "set", fn)
au.name = "jack"
eq_(fn.call_count, 1)
def test_unmapped_listen(self):
users = self.tables.users
class Foo:
pass
fn = Mock()
event.listen(Foo, "before_insert", fn, propagate=True)
class User(Foo):
pass
m = self.mapper_registry.map_imperatively(User, users)
u1 = User()
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
event.remove(Foo, "before_insert", fn)
# existing event is removed
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
# the _HoldEvents is also cleaned out
class Bar(Foo):
pass
m = self.mapper_registry.map_imperatively(Bar, users)
b1 = Bar()
m.dispatch.before_insert(m, None, attributes.instance_state(b1))
eq_(fn.call_count, 1)
def test_instance_event_listen_on_cls_before_map(self):
users = self.tables.users
fn = Mock()
class User:
pass
event.listen(User, "load", fn)
m = self.mapper_registry.map_imperatively(User, users)
u1 = User()
m.class_manager.dispatch.load(u1._sa_instance_state, "u1")
event.remove(User, "load", fn)
m.class_manager.dispatch.load(u1._sa_instance_state, "u2")
eq_(fn.mock_calls, [call(u1, "u1")])
| RemovalTest |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 11856,
"end": 12227
} | class ____(OAuth2Error):
"""
The Authorization Server requires End-User authentication.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User authentication.
"""
error = 'login_required'
| LoginRequired |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_bigquery_dts.py | {
"start": 1789,
"end": 4763
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.bigquery_dts.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = BiqQueryDataTransferServiceHook()
self.hook.get_credentials = mock.MagicMock(return_value=CREDENTIALS)
def test_disable_auto_scheduling(self):
expected = deepcopy(TRANSFER_CONFIG)
expected.schedule_options.disable_auto_scheduling = True
assert expected == self.hook._disable_auto_scheduling(TRANSFER_CONFIG)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery_dts.DataTransferServiceClient.create_transfer_config"
)
def test_create_transfer_config(self, service_mock):
self.hook.create_transfer_config(transfer_config=TRANSFER_CONFIG, project_id=PROJECT_ID)
parent = f"projects/{PROJECT_ID}"
expected_config = deepcopy(TRANSFER_CONFIG)
expected_config.schedule_options.disable_auto_scheduling = True
service_mock.assert_called_once_with(
request=dict(parent=parent, transfer_config=expected_config, authorization_code=None),
metadata=(),
retry=DEFAULT,
timeout=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery_dts.DataTransferServiceClient.delete_transfer_config"
)
def test_delete_transfer_config(self, service_mock):
self.hook.delete_transfer_config(transfer_config_id=TRANSFER_CONFIG_ID, project_id=PROJECT_ID)
name = f"projects/{PROJECT_ID}/transferConfigs/{TRANSFER_CONFIG_ID}"
service_mock.assert_called_once_with(
request=dict(name=name), metadata=(), retry=DEFAULT, timeout=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery_dts."
"DataTransferServiceClient.start_manual_transfer_runs"
)
def test_start_manual_transfer_runs(self, service_mock):
self.hook.start_manual_transfer_runs(transfer_config_id=TRANSFER_CONFIG_ID, project_id=PROJECT_ID)
parent = f"projects/{PROJECT_ID}/transferConfigs/{TRANSFER_CONFIG_ID}"
service_mock.assert_called_once_with(
request=dict(parent=parent, requested_time_range=None, requested_run_time=None),
metadata=(),
retry=DEFAULT,
timeout=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery_dts.DataTransferServiceClient.get_transfer_run"
)
def test_get_transfer_run(self, service_mock):
self.hook.get_transfer_run(
run_id=RUN_ID, transfer_config_id=TRANSFER_CONFIG_ID, project_id=PROJECT_ID
)
name = f"projects/{PROJECT_ID}/transferConfigs/{TRANSFER_CONFIG_ID}/runs/{RUN_ID}"
service_mock.assert_called_once_with(
request=dict(name=name), metadata=(), retry=DEFAULT, timeout=None
)
| TestBigQueryDataTransferHook |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 30376,
"end": 32079
} | class ____(NestedType):
"""
Variable length list type.
Parameters
----------
inner
The `DataType` of the values within each list.
Examples
--------
>>> df = pl.DataFrame(
... {
... "integer_lists": [[1, 2], [3, 4]],
... "float_lists": [[1.0, 2.0], [3.0, 4.0]],
... }
... )
>>> df
shape: (2, 2)
┌───────────────┬─────────────┐
│ integer_lists ┆ float_lists │
│ --- ┆ --- │
│ list[i64] ┆ list[f64] │
╞═══════════════╪═════════════╡
│ [1, 2] ┆ [1.0, 2.0] │
│ [3, 4] ┆ [3.0, 4.0] │
└───────────────┴─────────────┘
"""
inner: PolarsDataType
def __init__(self, inner: PolarsDataType | PythonDataType) -> None:
self.inner = polars.datatypes.parse_into_dtype(inner)
def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override]
# This equality check allows comparison of type classes and type instances.
# If a parent type is not specific about its inner type, we infer it as equal:
# > list[i64] == list[i64] -> True
# > list[i64] == list[f32] -> False
# > list[i64] == list -> True
# allow comparing object instances to class
if type(other) is DataTypeClass and issubclass(other, List):
return True
elif isinstance(other, List):
return self.inner == other.inner
else:
return False
def __hash__(self) -> int:
return hash((self.__class__, self.inner))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}({self.inner!r})"
| List |
python | tensorflow__tensorflow | tensorflow/lite/python/op_hint.py | {
"start": 20132,
"end": 21795
} | class ____:
"""Abstract operand for a tflite hint function._dynamic_rnn_loop.
This is a base class that handles representing arguments to an OpHint.
It also is able to serialize operands to the stubbed graph_def.
Child classes are responsible for being able to
store information about the hint identity operators. They are also responsible
for knowing how to serialize to output graphdefs.
Typically this will be implemented by holding one or more identity nodes
that were previously discovered as hints.
"""
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the node(s) to out_graphdef and returns the input node name.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The output that the stub should use as an input for this operand.
Raises:
RuntimeError: if the method is not implemented.
"""
del out_graphdef
raise RuntimeError("Unimplemented abstract method.")
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""Add node(s) to graph representing output operands and returns type.
Args:
fused_op_name: name of the fused op stub name.
output_index: Output index that we are currently processing from stub.
out_graphdef: The destination graphdef we are currently building up.
Returns:
The datatype of this identity.
Raises:
RuntimeError: if the method is not implemented.
"""
del fused_op_name, output_index, out_graphdef
raise RuntimeError("Unimplemented abstract method.")
| _LiteOperand |
python | numba__numba | numba/core/config.py | {
"start": 1911,
"end": 3495
} | class ____(int):
"""This class holds the "optimisation level" set in `NUMBA_OPT`. As this env
var can be an int or a string, but is almost always interpreted as an int,
this class subclasses int so as to get the common behaviour but stores the
actual value as a `_raw_value` member. The value "max" is a special case
and the property `is_opt_max` can be queried to find if the optimisation
level (supplied value at construction time) is "max"."""
def __new__(cls, *args, **kwargs):
assert len(args) == 1
(value,) = args
_int_value = 3 if value == 'max' else int(value)
# the int ctor is always called with an appropriate integer value
new = super().__new__(cls, _int_value, **kwargs)
# raw value is max or int
new._raw_value = value if value == 'max' else _int_value
return new
@property
def is_opt_max(self):
"""Returns True if the optimisation level is "max" False
otherwise."""
return self._raw_value == "max"
def __repr__(self):
if isinstance(self._raw_value, str):
arg = f"'{self._raw_value}'"
else:
arg = self._raw_value
return f"_OptLevel({arg})"
def _process_opt_level(opt_level):
if opt_level not in ('0', '1', '2', '3', 'max'):
msg = ("Environment variable `NUMBA_OPT` is set to an unsupported "
f"value '{opt_level}', supported values are 0, 1, 2, 3, and "
"'max'")
raise ValueError(msg)
else:
return _OptLevel(opt_level)
| _OptLevel |
python | ipython__ipython | IPython/core/magics/script.py | {
"start": 2443,
"end": 13706
} | class ____(Magics):
"""Magics for talking to scripts
This defines a base `%%script` cell magic for running a cell
with a program in a subprocess, and registers a few top-level
magics that call %%script with common interpreters.
"""
event_loop = Any(
help="""
The event loop on which to run subprocesses
Not the main event loop,
because we want to be able to make blocking calls
and have certain requirements we don't want to impose on the main loop.
"""
)
script_magics: List = List(
help="""Extra script cell magics to define
This generates simple wrappers of `%%script foo` as `%%foo`.
If you want to add script magics that aren't on your path,
specify them in script_paths
""",
).tag(config=True)
@default('script_magics')
def _script_magics_default(self):
"""default to a common list of programs"""
defaults = [
'sh',
'bash',
'perl',
'ruby',
'python',
'python2',
'python3',
'pypy',
]
if os.name == 'nt':
defaults.extend([
'cmd',
])
return defaults
script_paths = Dict(
help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
Only necessary for items in script_magics where the default path will not
find the right interpreter.
"""
).tag(config=True)
def __init__(self, shell=None):
super(ScriptMagics, self).__init__(shell=shell)
self._generate_script_magics()
self.bg_processes = []
atexit.register(self.kill_bg_processes)
def __del__(self):
self.kill_bg_processes()
def _generate_script_magics(self):
cell_magics = self.magics['cell']
for name in self.script_magics:
cell_magics[name] = self._make_script_magic(name)
def _make_script_magic(self, name):
"""make a named magic, that calls %%script with a particular program"""
# expand to explicit path if necessary:
script = self.script_paths.get(name, name)
@magic_arguments.magic_arguments()
@script_args
def named_script_magic(line, cell):
# if line, add it as cl-flags
if line:
line = "%s %s" % (script, line)
else:
line = script
return self.shebang(line, cell)
# write a basic docstring:
named_script_magic.__doc__ = \
"""%%{name} script magic
Run cells with {script} in a subprocess.
This is a shortcut for `%%script {script}`
""".format(**locals())
return named_script_magic
@magic_arguments.magic_arguments()
@script_args
@cell_magic("script")
def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
.. versionchanged:: 9.0
Interrupting the script executed without `--bg` will end in
raising an exception (unless `--no-raise-error` is passed).
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
# Create the event loop in which to run script magics
# this operates on a background thread
if self.event_loop is None:
if sys.platform == "win32":
# don't override the current policy,
# just create an event loop
event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop()
else:
event_loop = asyncio.new_event_loop()
self.event_loop = event_loop
# start the loop in a background thread
asyncio_thread = Thread(target=event_loop.run_forever, daemon=True)
asyncio_thread.start()
else:
event_loop = self.event_loop
def in_thread(coro):
"""Call a coroutine on the asyncio thread"""
return asyncio.run_coroutine_threadsafe(coro, event_loop).result()
async def _readchunk(stream):
try:
return await stream.read(100)
except asyncio.exceptions.IncompleteReadError as e:
return e.partial
except asyncio.exceptions.LimitOverrunError as e:
return await stream.read(e.consumed)
async def _handle_stream(stream, stream_arg, file_object):
should_break = False
decoder = getincrementaldecoder("utf-8")(errors="replace")
while True:
chunk = decoder.decode(await _readchunk(stream))
if not chunk:
break
chunk = decoder.decode("", final=True)
should_break = True
if stream_arg:
self.shell.user_ns[stream_arg] += chunk
else:
file_object.write(chunk)
file_object.flush()
if should_break:
break
async def _stream_communicate(process, cell):
process.stdin.write(cell)
process.stdin.close()
stdout_task = asyncio.create_task(
_handle_stream(process.stdout, args.out, sys.stdout)
)
stderr_task = asyncio.create_task(
_handle_stream(process.stderr, args.err, sys.stderr)
)
await asyncio.wait([stdout_task, stderr_task])
await process.wait()
argv = arg_split(line, posix=not sys.platform.startswith("win"))
args, cmd = self.shebang.parser.parse_known_args(argv)
if args.out:
self.shell.user_ns[args.out] = ""
if args.err:
self.shell.user_ns[args.err] = ""
try:
p = in_thread(
asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE,
)
)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
to_close = []
if args.out:
self.shell.user_ns[args.out] = _AsyncIOProxy(p.stdout, event_loop)
else:
to_close.append(p.stdout)
if args.err:
self.shell.user_ns[args.err] = _AsyncIOProxy(p.stderr, event_loop)
else:
to_close.append(p.stderr)
event_loop.call_soon_threadsafe(
lambda: asyncio.Task(self._run_script(p, cell, to_close))
)
if args.proc:
proc_proxy = _AsyncIOProxy(p, event_loop)
proc_proxy.stdout = _AsyncIOProxy(p.stdout, event_loop)
proc_proxy.stderr = _AsyncIOProxy(p.stderr, event_loop)
self.shell.user_ns[args.proc] = proc_proxy
return
try:
in_thread(_stream_communicate(p, cell))
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
if p.returncode is not None:
print("Process was interrupted.")
if args.raise_error:
raise RaiseAfterInterrupt()
else:
return
p.terminate()
in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
if p.returncode is not None:
print("Process was terminated.")
if args.raise_error:
raise RaiseAfterInterrupt()
else:
return
p.kill()
print("Process was killed.")
if args.raise_error:
raise RaiseAfterInterrupt()
except RaiseAfterInterrupt:
pass
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e))
if args.raise_error:
raise CalledProcessError(p.returncode, cell) from None
else:
return
if args.raise_error and p.returncode != 0:
# If we get here and p.returncode is still None, we must have
# killed it but not yet seen its return code. We don't wait for it,
# in case it's stuck in uninterruptible sleep. -9 = SIGKILL
rc = p.returncode or -9
raise CalledProcessError(rc, cell)
shebang.__skip_doctest__ = os.name != "posix"
async def _run_script(self, p, cell, to_close):
"""callback for running the script in the background"""
p.stdin.write(cell)
await p.stdin.drain()
p.stdin.close()
await p.stdin.wait_closed()
await p.wait()
# asyncio read pipes have no close
# but we should drain the data anyway
for s in to_close:
await s.read()
self._gc_bg_processes()
@line_magic("killbgscripts")
def killbgscripts(self, _nouse_=''):
"""Kill all BG processes started by %%script and its family."""
self.kill_bg_processes()
print("All background processes were killed.")
def kill_bg_processes(self):
"""Kill all BG processes which are still running."""
if not self.bg_processes:
return
for p in self.bg_processes:
if p.returncode is None:
try:
p.send_signal(signal.SIGINT)
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.returncode is None:
try:
p.terminate()
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.returncode is None:
try:
p.kill()
except:
pass
self._gc_bg_processes()
def _gc_bg_processes(self):
self.bg_processes = [p for p in self.bg_processes if p.returncode is None]
| ScriptMagics |
python | scipy__scipy | scipy/optimize/tests/test_least_squares.py | {
"start": 18951,
"end": 23517
} | class ____:
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
@pytest.mark.fail_slow(10)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
| SparseMixin |
python | dask__distributed | distributed/_async_taskgroup.py | {
"start": 1239,
"end": 4587
} | class ____(_LoopBoundMixin):
"""Collection tracking all currently running asynchronous tasks within a group"""
#: If True, the group is closed and does not allow adding new tasks.
closed: bool
def __init__(self) -> None:
self.closed = False
self._ongoing_tasks: set[asyncio.Task[None]] = set()
def call_soon(
self, afunc: Callable[P, Coro[None]], /, *args: P.args, **kwargs: P.kwargs
) -> None:
"""Schedule a coroutine function to be executed as an `asyncio.Task`.
The coroutine function `afunc` is scheduled with `args` arguments and `kwargs` keyword arguments
as an `asyncio.Task`.
Parameters
----------
afunc
Coroutine function to schedule.
*args
Arguments to be passed to `afunc`.
**kwargs
Keyword arguments to be passed to `afunc`
Returns
-------
None
Raises
------
AsyncTaskGroupClosedError
If the task group is closed.
"""
if self.closed: # Avoid creating a coroutine
raise AsyncTaskGroupClosedError(
"Cannot schedule a new coroutine function as the group is already closed."
)
task = self._get_loop().create_task(afunc(*args, **kwargs))
task.add_done_callback(self._ongoing_tasks.remove)
self._ongoing_tasks.add(task)
return None
def call_later(
self,
delay: float,
afunc: Callable[P, Coro[None]],
/,
*args: P.args,
**kwargs: P.kwargs,
) -> None:
"""Schedule a coroutine function to be executed after `delay` seconds as an `asyncio.Task`.
The coroutine function `afunc` is scheduled with `args` arguments and `kwargs` keyword arguments
as an `asyncio.Task` that is executed after `delay` seconds.
Parameters
----------
delay
Delay in seconds.
afunc
Coroutine function to schedule.
*args
Arguments to be passed to `afunc`.
**kwargs
Keyword arguments to be passed to `afunc`
Returns
-------
The None
Raises
------
AsyncTaskGroupClosedError
If the task group is closed.
"""
self.call_soon(_delayed(afunc, delay), *args, **kwargs)
def close(self) -> None:
"""Closes the task group so that no new tasks can be scheduled.
Existing tasks continue to run.
"""
self.closed = True
async def stop(self) -> None:
"""Close the group and stop all currently running tasks.
Closes the task group and cancels all tasks. All tasks are cancelled
an additional time for each time this task is cancelled.
"""
self.close()
current_task = asyncio.current_task(self._get_loop())
err = None
while tasks_to_stop := (self._ongoing_tasks - {current_task}):
for task in tasks_to_stop:
task.cancel()
try:
await asyncio.wait(tasks_to_stop)
except asyncio.CancelledError as e:
err = e
if err is not None:
raise err
def __len__(self):
return len(self._ongoing_tasks)
| AsyncTaskGroup |
python | walkccc__LeetCode | solutions/1344. Angle Between Hands of a Clock/1344.py | {
"start": 0,
"end": 224
} | class ____:
def angleClock(self, hour: int, minutes: int) -> float:
hourAngle = (hour % 12) * 30 + minutes * 0.5
minuteAngle = minutes * 6
ans = abs(hourAngle - minuteAngle)
return min(ans, 360 - ans)
| Solution |
python | optuna__optuna | tests/storages_tests/rdb_tests/test_models.py | {
"start": 1107,
"end": 2696
} | class ____:
@staticmethod
def _create_model(session: Session) -> StudyModel:
study = StudyModel(study_id=1, study_name="test-study")
dummy_study = StudyModel(study_id=2, study_name="dummy-study")
session.add(
StudyDirectionModel(
study_id=study.study_id, direction=StudyDirection.MINIMIZE, objective=0
)
)
session.add(
StudyDirectionModel(
study_id=dummy_study.study_id, direction=StudyDirection.MINIMIZE, objective=0
)
)
session.commit()
return study
@staticmethod
def test_where_study_id(session: Session) -> None:
study = TestStudyDirectionModel._create_model(session)
assert 1 == len(StudyDirectionModel.where_study_id(study.study_id, session))
assert 0 == len(StudyDirectionModel.where_study_id(-1, session))
@staticmethod
def test_cascade_delete_on_study(session: Session) -> None:
directions = [
StudyDirectionModel(study_id=1, direction=StudyDirection.MINIMIZE, objective=0),
StudyDirectionModel(study_id=1, direction=StudyDirection.MAXIMIZE, objective=1),
]
study = StudyModel(study_id=1, study_name="test-study", directions=directions)
session.add(study)
session.commit()
assert 2 == len(StudyDirectionModel.where_study_id(study.study_id, session))
session.delete(study)
session.commit()
assert 0 == len(StudyDirectionModel.where_study_id(study.study_id, session))
| TestStudyDirectionModel |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/control_flow_ops_py_test.py | {
"start": 183849,
"end": 187616
} | class ____(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
self.evaluate(variables.global_variables_initializer())
if static_unroll:
for _ in range(steps):
i, x = loop_body(i, x)
else:
i, x = while_loop_tf.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in range(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in range(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
| WhileOpBenchmark |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 85485,
"end": 86619
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[2, 3]"):
l_x_ = L_x_
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, l_x_); wrap_body_0 = l_x_ = None
a: "f32[2, 3]" = wrap[0]
b: "f32[2, 3]" = wrap[1]; wrap = None
add: "f32[2, 3]" = a + b; a = b = None
return (add,)
class wrap_body_0(torch.nn.Module):
def forward(self, l_x_: "f32[2, 3]"):
sin: "f32[2, 3]" = l_x_.sin()
cos: "f32[2, 3]" = l_x_.cos(); l_x_ = None
return (sin, cos)
""",
)
def test_output_with_dict(self):
def f(x):
return wrap(lambda x: [{"a": -x}], x)
x = torch.randn(3)
counters.clear()
arg_count = ifdynstaticdefault(2, 3)
graph = self._test_wrap_simple(
f, default_args_generator((x,)), arg_count, 2, return_graph=True
)
self.assertEqual(len(counters["graph_break"]), 0)
if check_dynamic_shape_capture():
return
self.assertExpectedInline(
graph,
"""\
| GraphModule |
python | django__django | tests/one_to_one/models.py | {
"start": 2876,
"end": 3045
} | class ____(models.Model):
is_temp = models.BooleanField(default=False)
school = models.OneToOneField(School, models.CASCADE)
objects = DirectorManager()
| Director |
python | openai__gym | tests/wrappers/test_step_compatibility.py | {
"start": 106,
"end": 410
} | class ____(gym.Env):
def __init__(self):
self.action_space = Discrete(2)
self.observation_space = Discrete(2)
def step(self, action):
obs = self.observation_space.sample()
rew = 0
done = False
info = {}
return obs, rew, done, info
| OldStepEnv |
python | keon__algorithms | algorithms/compression/huffman_coding.py | {
"start": 5477,
"end": 6280
} | class ____:
"""
Class to help find signs in tree
"""
def __init__(self, tree):
self.root = tree
self.current_node = tree
self.found = None
def find(self, bit):
"""
Find sign in tree
:param bit:
:return: True if sign is found
"""
if bit == "0":
self.current_node = self.current_node.left
elif bit == "1":
self.current_node = self.current_node.right
else:
self._reset()
return True
if self.current_node.sign is not None:
self._reset(self.current_node.sign)
return True
else:
return False
def _reset(self, found=""):
self.found = found
self.current_node = self.root
| TreeFinder |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/models.py | {
"start": 11460,
"end": 11794
} | class ____:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
models.list,
)
| AsyncModelsWithRawResponse |
python | getsentry__sentry | src/sentry/codecov/endpoints/test_results_aggregates/test_results_aggregates.py | {
"start": 921,
"end": 2544
} | class ____(CodecovEndpoint):
__test__ = False
owner = ApiOwner.CODECOV
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve aggregated test result metrics for repository, owner, and organization",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
PreventParams.OWNER,
PreventParams.REPOSITORY,
PreventParams.INTERVAL,
PreventParams.BRANCH,
],
request=None,
responses={
200: TestResultAggregatesSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, owner: RpcIntegration, repository: str, **kwargs) -> Response:
"""
Retrieves aggregated test result metrics for a given repository and owner.
Also accepts a query parameter to specify the time period for the metrics.
"""
owner_slug = owner.name
variables = {
"owner": owner_slug,
"repo": repository,
"branch": request.query_params.get("branch"),
"interval": request.query_params.get(
"interval", MeasurementInterval.INTERVAL_30_DAY.value
),
}
client = CodecovApiClient(git_provider_org=owner_slug)
graphql_response = client.query(query=query, variables=variables)
test_results = TestResultAggregatesSerializer().to_representation(graphql_response.json())
return Response(test_results)
| TestResultsAggregatesEndpoint |
python | pikepdf__pikepdf | src/pikepdf/models/metadata.py | {
"start": 9975,
"end": 33056
} | class ____(MutableMapping):
"""Read and edit the metadata associated with a PDF.
The PDF specification contain two types of metadata, the newer XMP
(Extensible Metadata Platform, XML-based) and older DocumentInformation
dictionary. The PDF 2.0 specification removes the DocumentInformation
dictionary.
This primarily works with XMP metadata, but includes methods to generate
XMP from DocumentInformation and will also coordinate updates to
DocumentInformation so that the two are kept consistent.
XMP metadata fields may be accessed using the full XML namespace URI or
the short name. For example ``metadata['dc:description']``
and ``metadata['{http://purl.org/dc/elements/1.1/}description']``
both refer to the same field. Several common XML namespaces are registered
automatically.
See the XMP specification for details of allowable fields.
To update metadata, use a with block.
Example:
>>> with pdf.open_metadata() as records:
... records['dc:title'] = 'New Title'
See Also:
:meth:`pikepdf.Pdf.open_metadata`
"""
DOCINFO_MAPPING: list[DocinfoMapping] = [
DocinfoMapping(XMP_NS_DC, 'creator', Name.Author, AuthorConverter),
DocinfoMapping(XMP_NS_DC, 'description', Name.Subject, None),
DocinfoMapping(XMP_NS_DC, 'title', Name.Title, None),
DocinfoMapping(XMP_NS_PDF, 'Keywords', Name.Keywords, None),
DocinfoMapping(XMP_NS_PDF, 'Producer', Name.Producer, None),
DocinfoMapping(XMP_NS_XMP, 'CreateDate', Name.CreationDate, DateConverter),
DocinfoMapping(XMP_NS_XMP, 'CreatorTool', Name.Creator, None),
DocinfoMapping(XMP_NS_XMP, 'ModifyDate', Name.ModDate, DateConverter),
]
NS: dict[str, str] = {prefix: uri for uri, prefix in DEFAULT_NAMESPACES}
REVERSE_NS: dict[str, str] = dict(DEFAULT_NAMESPACES)
_PARSERS_OVERWRITE_INVALID_XML: Iterable[Callable[[bytes], Any]] = [
_parser_basic,
_parser_strip_illegal_bytes,
_parser_recovery,
_parser_replace_with_empty_xmp,
]
_PARSERS_STANDARD: Iterable[Callable[[bytes], Any]] = [_parser_basic]
@classmethod
def register_xml_namespace(cls, uri, prefix):
"""Register a new XML/XMP namespace.
Arguments:
uri: The long form of the namespace.
prefix: The alias to use when interpreting XMP.
"""
cls.NS[prefix] = uri
cls.REVERSE_NS[uri] = prefix
etree.register_namespace(_prefix, _uri)
def __init__(
self,
pdf: Pdf,
pikepdf_mark: bool = True,
sync_docinfo: bool = True,
overwrite_invalid_xml: bool = True,
):
"""Construct PdfMetadata. Use Pdf.open_metadata() instead."""
self._pdf = pdf
self.mark = pikepdf_mark
self.sync_docinfo = sync_docinfo
self._updating = False
self.overwrite_invalid_xml = overwrite_invalid_xml
self._xmp = None
def load_from_docinfo(
self, docinfo, delete_missing: bool = False, raise_failure: bool = False
) -> None:
"""Populate the XMP metadata object with DocumentInfo.
Arguments:
docinfo: a DocumentInfo, e.g pdf.docinfo
delete_missing: if the entry is not DocumentInfo, delete the equivalent
from XMP
raise_failure: if True, raise any failure to convert docinfo;
otherwise warn and continue
A few entries in the deprecated DocumentInfo dictionary are considered
approximately equivalent to certain XMP records. This method copies
those entries into the XMP metadata.
"""
def warn_or_raise(msg, e=None):
if raise_failure:
raise ValueError(msg) from e
warn(msg)
for uri, shortkey, docinfo_name, converter in self.DOCINFO_MAPPING:
qname = QName(uri, shortkey)
# docinfo might be a dict or pikepdf.Dictionary, so lookup keys
# by str(Name)
val = docinfo.get(str(docinfo_name))
if val is None:
if delete_missing and qname in self:
del self[qname]
continue
try:
val = str(val)
if converter:
val = converter.xmp_from_docinfo(val)
if not val:
continue
self._setitem(qname, val, True)
except (ValueError, AttributeError, NotImplementedError) as e:
warn_or_raise(
f"The metadata field {docinfo_name} could not be copied to XMP", e
)
valid_docinfo_names = {
str(docinfo_name) for _, _, docinfo_name, _ in self.DOCINFO_MAPPING
}
extra_docinfo_names = {str(k) for k in docinfo.keys()} - valid_docinfo_names
for extra in extra_docinfo_names:
warn_or_raise(
f"The metadata field {extra} with value '{repr(docinfo.get(extra))}' "
"has no XMP equivalent, so it was discarded",
)
def _load(self) -> None:
try:
data = self._pdf.Root.Metadata.read_bytes()
except AttributeError:
data = b''
self._load_from(data)
def _load_from(self, data: bytes) -> None:
if data.strip() == b'':
data = XMP_EMPTY # on some platforms lxml chokes on empty documents
parsers = (
self._PARSERS_OVERWRITE_INVALID_XML
if self.overwrite_invalid_xml
else self._PARSERS_STANDARD
)
for parser in parsers:
try:
self._xmp = parser(data)
except (
XMLSyntaxError if self.overwrite_invalid_xml else NeverRaise # type: ignore
) as e:
if str(e).startswith("Start tag expected, '<' not found") or str(
e
).startswith("Document is empty"):
self._xmp = _parser_replace_with_empty_xmp()
break
else:
break
if self._xmp is not None:
try:
pis = self._xmp.xpath('/processing-instruction()')
for pi in pis:
etree.strip_tags(self._xmp, pi.tag)
self._get_rdf_root()
except (
Exception # pylint: disable=broad-except
if self.overwrite_invalid_xml
else NeverRaise
) as e:
log.warning("Error occurred parsing XMP", exc_info=e)
self._xmp = _parser_replace_with_empty_xmp()
else:
log.warning("Error occurred parsing XMP")
self._xmp = _parser_replace_with_empty_xmp()
@ensure_loaded
def __enter__(self):
"""Open metadata for editing."""
self._updating = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close metadata and apply changes."""
try:
if exc_type is not None:
return
self._apply_changes()
finally:
self._updating = False
def _update_docinfo(self):
"""Update the PDF's DocumentInfo dictionary to match XMP metadata.
The standard mapping is described here:
https://www.pdfa.org/pdfa-metadata-xmp-rdf-dublin-core/
"""
# Touch object to ensure it exists
self._pdf.docinfo # pylint: disable=pointless-statement
for uri, element, docinfo_name, converter in self.DOCINFO_MAPPING:
qname = QName(uri, element)
try:
value = self[qname]
except KeyError:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
if converter:
try:
value = converter.docinfo_from_xmp(value)
except ValueError:
warn(
f"The DocumentInfo field {docinfo_name} could not be "
"updated from XMP"
)
value = None
except Exception as e:
raise ValueError(
"An error occurred while updating DocumentInfo field "
f"{docinfo_name} from XMP {qname} with value {value}"
) from e
if value is None:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
value = _clean(value)
try:
# Try to save pure ASCII
self._pdf.docinfo[docinfo_name] = value.encode('ascii')
except UnicodeEncodeError:
# qpdf will serialize this as a UTF-16 with BOM string
self._pdf.docinfo[docinfo_name] = value
def _get_xml_bytes(self, xpacket=True):
data = BytesIO()
if xpacket:
data.write(XPACKET_BEGIN)
self._xmp.write(data, encoding='utf-8', pretty_print=True)
if xpacket:
data.write(XPACKET_END)
data.seek(0)
xml_bytes = data.read()
return xml_bytes
def _apply_changes(self):
"""Serialize our changes back to the PDF in memory.
Depending how we are initialized, leave our metadata mark and producer.
"""
if self.mark:
# We were asked to mark the file as being edited by pikepdf
self._setitem(
QName(XMP_NS_XMP, 'MetadataDate'),
datetime.now(timezone.utc).isoformat(),
applying_mark=True,
)
self._setitem(
QName(XMP_NS_PDF, 'Producer'),
'pikepdf ' + pikepdf_version,
applying_mark=True,
)
xml = self._get_xml_bytes()
self._pdf.Root.Metadata = Stream(self._pdf, xml)
self._pdf.Root.Metadata[Name.Type] = Name.Metadata
self._pdf.Root.Metadata[Name.Subtype] = Name.XML
if self.sync_docinfo:
self._update_docinfo()
@classmethod
def _qname(cls, name: QName | str) -> str:
"""Convert name to an XML QName.
e.g. pdf:Producer -> {http://ns.adobe.com/pdf/1.3/}Producer
"""
if isinstance(name, QName):
return str(name)
if not isinstance(name, str):
raise TypeError(f"{name} must be str")
if name == '':
return name
if name.startswith('{'):
return name
try:
prefix, tag = name.split(':', maxsplit=1)
except ValueError:
# If missing the namespace, it belongs in the default namespace.
# A tag such <xyz xmlns="http://example.com"> defines a default
# namespace of http://example.com for all enclosed tags that don't
# override the namespace with a colon prefix.
# XMP does not usually use the default namespace, so we can
# assume it's just blank. In practice a document that depends on
# defining a default namespace over some part of its content
# could introduce a collision.
# See: https://www.w3.org/TR/REC-xml-names/#dt-defaultNS
prefix, tag = '', name
uri = cls.NS.get(prefix, None)
return str(QName(uri, tag))
def _prefix_from_uri(self, uriname):
"""Given a fully qualified XML name, find a prefix.
e.g. {http://ns.adobe.com/pdf/1.3/}Producer -> pdf:Producer
"""
uripart, tag = uriname.split('}', maxsplit=1)
uri = uripart.replace('{', '')
return self.REVERSE_NS[uri] + ':' + tag
def _get_subelements(self, node: _Element) -> Any:
"""Gather the sub-elements attached to a node.
Gather rdf:Bag and and rdf:Seq into set and list respectively. For
alternate languages values, take the first language only for
simplicity.
"""
items = node.find('rdf:Alt', self.NS)
if items is not None:
try:
return items[0].text
except IndexError:
return ''
for xmlcontainer, container, insertfn in XMP_CONTAINERS:
items = node.find(f'rdf:{xmlcontainer}', self.NS)
if items is None:
continue
result = container()
for item in items:
insertfn(result, item.text)
return result
return ''
def _get_rdf_root(self) -> _Element:
assert self._xmp is not None
rdf = self._xmp.find('.//rdf:RDF', self.NS)
if rdf is None:
rdf = self._xmp.getroot()
if not rdf.tag == '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF':
raise ValueError("Metadata seems to be XML but not XMP")
return rdf
def _get_elements(
self, name: str | QName = ''
) -> Iterator[tuple[_Element, str | bytes | None, Any, _Element]]:
"""Get elements from XMP.
Core routine to find elements matching name within the XMP and yield
them.
For XMP spec 7.9.2.2, rdf:Description with property attributes,
we yield the node which will have the desired as one of its attributes.
qname is returned so that the node.attrib can be used to locate the
source.
For XMP spec 7.5, simple valued XMP properties, we yield the node,
None, and the value. For structure or array valued properties we gather
the elements. We ignore qualifiers.
Args:
name: a prefixed name or QName to look for within the
data section of the XMP; looks for all data keys if omitted
Yields:
tuple: (node, qname_attrib, value, parent_node)
"""
qname = self._qname(name)
rdf = self._get_rdf_root()
for rdfdesc in rdf.findall('rdf:Description[@rdf:about=""]', self.NS):
if qname and qname in rdfdesc.keys():
yield (rdfdesc, qname, rdfdesc.get(qname), rdf)
elif not qname:
for k, v in rdfdesc.items():
if v:
yield (rdfdesc, k, v, rdf)
xpath = qname if name else '*'
for node in rdfdesc.findall(xpath, self.NS):
if node.text and node.text.strip():
yield (node, None, node.text, rdfdesc)
continue
values = self._get_subelements(node)
yield (node, None, values, rdfdesc)
def _get_element_values(self, name: str | QName = '') -> Iterator[Any]:
yield from (v[2] for v in self._get_elements(name))
@ensure_loaded
def __contains__(self, key: str | QName):
"""Test if XMP key is in metadata."""
return any(self._get_element_values(key))
@ensure_loaded
def __getitem__(self, key: str | QName):
"""Retrieve XMP metadata for key."""
try:
return next(self._get_element_values(key))
except StopIteration:
raise KeyError(key) from None
@ensure_loaded
def __iter__(self):
"""Iterate through XMP metadata attributes and nodes."""
for node, attrib, _val, _parents in self._get_elements():
if attrib:
yield attrib
else:
yield node.tag
@ensure_loaded
def __len__(self):
"""Return number of items in metadata."""
return len(list(iter(self)))
def _setitem(
self,
key: str | QName,
val: set[str] | list[str] | str,
applying_mark: bool = False,
):
if not self._updating:
raise RuntimeError("Metadata not opened for editing, use with block")
qkey = self._qname(key)
self._setitem_check_args(key, val, applying_mark, qkey)
try:
# Update existing node
self._setitem_update(key, val, qkey)
except StopIteration:
# Insert a new node
self._setitem_insert(key, val)
def _setitem_check_args(self, key, val, applying_mark: bool, qkey: str) -> None:
if (
self.mark
and not applying_mark
and qkey
in (
self._qname('xmp:MetadataDate'),
self._qname('pdf:Producer'),
)
):
# Complain if user writes self[pdf:Producer] = ... and because it will
# be overwritten on save, unless self._updating_mark, in which case
# the action was initiated internally
log.warning(
f"Update to {key} will be overwritten because metadata was opened "
"with set_pikepdf_as_editor=True"
)
if isinstance(val, str) and qkey in (self._qname('dc:creator')):
log.error(f"{key} should be set to a list of strings")
def _setitem_add_array(self, node, items: Iterable) -> None:
rdf_type = next(
c.rdf_type for c in XMP_CONTAINERS if isinstance(items, c.py_type)
)
seq = etree.SubElement(node, str(QName(XMP_NS_RDF, rdf_type)))
tag_attrib: dict[str, str] | None = None
if rdf_type == 'Alt':
tag_attrib = {str(QName(XMP_NS_XML, 'lang')): 'x-default'}
for item in items:
el = etree.SubElement(seq, str(QName(XMP_NS_RDF, 'li')), attrib=tag_attrib)
if item is not None:
inner_text: str | None = _clean(item)
if inner_text == '':
inner_text = None
el.text = inner_text
def _setitem_update(self, key, val, qkey):
# Locate existing node to replace
node, attrib, _oldval, _parent = next(self._get_elements(key))
if attrib:
if not isinstance(val, str):
if qkey == self._qname('dc:creator'):
# dc:creator incorrectly created as an attribute - we're
# replacing it anyway, so remove the old one
del node.attrib[qkey]
self._setitem_add_array(node, _clean(val))
else:
raise TypeError(f"Setting {key} to {val} with type {type(val)}")
else:
node.set(attrib, _clean(val))
elif isinstance(val, list | set):
for child in node.findall('*'):
node.remove(child)
self._setitem_add_array(node, val)
elif isinstance(val, str):
for child in node.findall('*'):
node.remove(child)
if str(self._qname(key)) in LANG_ALTS:
self._setitem_add_array(node, AltList([_clean(val)]))
else:
node.text = _clean(val)
else:
raise TypeError(f"Setting {key} to {val} with type {type(val)}")
def _setitem_insert(self, key, val):
rdf = self._get_rdf_root()
if str(self._qname(key)) in LANG_ALTS:
val = AltList([_clean(val)])
if isinstance(val, list | set):
rdfdesc = etree.SubElement(
rdf,
str(QName(XMP_NS_RDF, 'Description')),
attrib={str(QName(XMP_NS_RDF, 'about')): ''},
)
node = etree.SubElement(rdfdesc, self._qname(key))
self._setitem_add_array(node, val)
elif isinstance(val, str):
rdfdesc = rdf.find('rdf:Description[@rdf:about=""]', self.NS)
if rdfdesc is None:
rdfdesc = etree.SubElement(
rdf,
str(QName(XMP_NS_RDF, 'Description')),
attrib={str(QName(XMP_NS_RDF, 'about')): ''},
)
node = etree.SubElement(rdfdesc, self._qname(key))
node.text = _clean(val)
else:
raise TypeError(f"Setting {key} to {val} with type {type(val)}") from None
@ensure_loaded
def __setitem__(self, key: str | QName, val: set[str] | list[str] | str):
"""Set XMP metadata key to value."""
return self._setitem(key, val, False)
@ensure_loaded
def __delitem__(self, key: str | QName):
"""Delete item from XMP metadata."""
if not self._updating:
raise RuntimeError("Metadata not opened for editing, use with block")
try:
node, attrib, _oldval, parent = next(self._get_elements(key))
if attrib: # Inline
del node.attrib[attrib]
if (
len(node.attrib) == 1
and len(node) == 0
and QName(XMP_NS_RDF, 'about') in node.attrib.keys()
):
# The only thing left on this node is rdf:about="", so remove it
parent.remove(node)
else:
parent.remove(node)
except StopIteration:
raise KeyError(key) from None
@property
def pdfa_status(self) -> str:
"""Return the PDF/A conformance level claimed by this PDF, or False.
A PDF may claim to PDF/A compliant without this being true. Use an
independent verifier such as veraPDF to test if a PDF is truly
conformant.
Returns:
The conformance level of the PDF/A, or an empty string if the
PDF does not claim PDF/A conformance. Possible valid values
are: 1A, 1B, 2A, 2B, 2U, 3A, 3B, 3U. Note that ISO standard
typically refers to PDF/A-1b for example, using lower case;
this function returns the value as it appears in the PDF, which
is uppercase.
"""
# do same as @ensure_loaded - mypy can't handle decorated property
if not self._xmp:
self._load()
key_part = QName(XMP_NS_PDFA_ID, 'part')
key_conformance = QName(XMP_NS_PDFA_ID, 'conformance')
try:
return self[key_part] + self[key_conformance]
except KeyError:
return ''
@property
def pdfx_status(self) -> str:
"""Return the PDF/X conformance level claimed by this PDF, or False.
A PDF may claim to PDF/X compliant without this being true. Use an
independent verifier such as veraPDF to test if a PDF is truly
conformant.
Returns:
The conformance level of the PDF/X, or an empty string if the
PDF does not claim PDF/X conformance.
"""
# do same as @ensure_loaded - mypy can't handle decorated property
if not self._xmp:
self._load()
pdfx_version = QName(XMP_NS_PDFX_ID, 'GTS_PDFXVersion')
try:
return self[pdfx_version]
except KeyError:
return ''
@ensure_loaded
def __str__(self):
"""Convert XMP metadata to XML string."""
return self._get_xml_bytes(xpacket=False).decode('utf-8')
| PdfMetadata |
python | imageio__imageio | imageio/plugins/_tifffile.py | {
"start": 166973,
"end": 172051
} | class ____(object):
"""TIFF tag structure.
Attributes
----------
name : string
Name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF DATA_FORMATS.
count : int
Number of values.
value : various types
Tag data as Python object.
ImageSourceData : int
Location of value in file.
All attributes are read-only.
"""
__slots__ = ("code", "count", "dtype", "value", "valueoffset")
class Error(Exception):
pass
def __init__(self, parent, tagheader, **kwargs):
"""Initialize instance from tag header."""
fh = parent.filehandle
byteorder = parent.byteorder
unpack = struct.unpack
offsetsize = parent.offsetsize
self.valueoffset = fh.tell() + offsetsize + 4
code, type_ = unpack(parent.tagformat1, tagheader[:4])
count, value = unpack(parent.tagformat2, tagheader[4:])
try:
dtype = TIFF.DATA_FORMATS[type_]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % type_)
fmt = "%s%i%s" % (byteorder, count * int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > offsetsize or code in TIFF.TAG_READERS:
self.valueoffset = offset = unpack(parent.offsetformat, value)[0]
if offset < 8 or offset > fh.size - size:
raise TiffTag.Error("invalid tag value offset")
# if offset % 2:
# warnings.warn('tag value does not begin on word boundary')
fh.seek(offset)
if code in TIFF.TAG_READERS:
readfunc = TIFF.TAG_READERS[code]
value = readfunc(fh, byteorder, dtype, count, offsetsize)
elif type_ == 7 or (count > 1 and dtype[-1] == "B"):
value = read_bytes(fh, byteorder, dtype, count, offsetsize)
elif code in TIFF.TAGS or dtype[-1] == "s":
value = unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count, offsetsize)
elif dtype[-1] == "B" or type_ == 7:
value = value[:size]
else:
value = unpack(fmt, value[:size])
process = (
code not in TIFF.TAG_READERS and code not in TIFF.TAG_TUPLE and type_ != 7
)
if process and dtype[-1] == "s" and isinstance(value[0], bytes):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = value[0]
try:
value = bytes2str(stripascii(value).strip())
except UnicodeDecodeError:
warnings.warn("tag %i: coercing invalid ASCII to bytes" % code)
dtype = "1B"
else:
if code in TIFF.TAG_ENUM:
t = TIFF.TAG_ENUM[code]
try:
value = tuple(t(v) for v in value)
except ValueError as e:
warnings.warn(str(e))
if process:
if len(value) == 1:
value = value[0]
self.code = code
self.dtype = dtype
self.count = count
self.value = value
@property
def name(self):
return TIFF.TAGS.get(self.code, str(self.code))
def _fix_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this case; need example file
warnings.warn("correcting LSM bitspersample tag")
tof = parent.offsetformat[parent.offsetsize]
self.valueoffset = struct.unpack(tof, self._value)[0]
parent.filehandle.seek(self.valueoffset)
self.value = struct.unpack("<HH", parent.filehandle.read(4))
def __str__(self, detail=0, width=79):
"""Return string containing information about tag."""
height = 1 if detail <= 0 else 8 * detail
tcode = "%i%s" % (self.count * int(self.dtype[0]), self.dtype[1])
line = (
"TiffTag %i %s %s @%i "
% (self.code, self.name, tcode, self.valueoffset)[:width]
)
if self.code in TIFF.TAG_ENUM:
if self.count == 1:
value = TIFF.TAG_ENUM[self.code](self.value).name
else:
value = pformat(tuple(v.name for v in self.value))
else:
value = pformat(self.value, width=width, height=height)
if detail <= 0:
line += value
line = line[:width]
else:
line += "\n" + value
return line
# Added to produce cleaner exceptions if tifffile unexpectedly fails to open the
# file. See this comment (and the following) for details:
# https://github.com/imageio/imageio/commit/bdbe699bbcda4223b0b6bd4d7474f84bbe34af09#r64068747
| TiffTag |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/pipes/message_readers.py | {
"start": 8823,
"end": 11738
} | class ____(PipesLogReader):
def __init__(
self,
client=None,
log_group: Optional[str] = None,
log_stream: Optional[str] = None,
target_stream: Optional[IO[str]] = None,
start_time: Optional[int] = None,
debug_info: Optional[str] = None,
max_retries: Optional[int] = DEFAULT_CLOUDWATCH_LOGS_MAX_RETRIES,
):
self.client = client or boto3.client("logs")
self.log_group = log_group
self.log_stream = log_stream
self.target_stream = target_stream or sys.stdout
self.thread = None
self.start_time = start_time
self._debug_info = debug_info
self.max_retries = max_retries
@property
def debug_info(self) -> Optional[str]:
return self._debug_info
def target_is_readable(self, params: PipesParams) -> bool:
log_group = params.get("log_group") or self.log_group
log_stream = params.get("log_stream") or self.log_stream
if log_group is not None and log_stream is not None:
# check if the stream actually exists
try:
resp = self.client.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=log_stream,
)
if resp.get("logStreams", []):
return True
else:
return False
except self.client.exceptions.ResourceNotFoundException:
return False
else:
return False
def start(self, params: PipesParams, is_session_closed: Event) -> None:
if not self.target_is_readable(params):
raise DagsterInvariantViolationError(
"log_group and log_stream must be set either in the constructor or in Pipes params."
)
self.thread = Thread(
target=self._start, kwargs={"params": params, "is_session_closed": is_session_closed}
)
self.thread.start()
def _start(self, params: PipesParams, is_session_closed: Event) -> None:
log_group = cast("str", params.get("log_group") or self.log_group)
log_stream = cast("str", params.get("log_stream") or self.log_stream)
start_time = cast("int", self.start_time or params.get("start_time"))
for events in tail_cloudwatch_events(
self.client, log_group, log_stream, start_time=start_time, max_retries=self.max_retries
):
for event in events:
for line in event.get("message", "").splitlines():
if line:
forward_only_logs_to_file(line, self.target_stream)
if is_session_closed.is_set():
return
def stop(self) -> None:
pass
def is_running(self) -> bool:
return self.thread is not None and self.thread.is_alive()
| PipesCloudWatchLogReader |
python | Delgan__loguru | loguru/_datetime.py | {
"start": 4688,
"end": 6136
} | class ____(datetime_): # noqa: N801
def __format__(self, fmt):
return _compile_format(fmt)(self)
def _fallback_tzinfo(timestamp):
utc_naive = datetime_.fromtimestamp(timestamp, tz=timezone.utc).replace(tzinfo=None)
offset = datetime_.fromtimestamp(timestamp) - utc_naive
seconds = offset.total_seconds()
zone = strftime("%Z")
return timezone(timedelta(seconds=seconds), zone)
def _get_tzinfo(timestamp):
try:
local = localtime(timestamp)
except (OSError, OverflowError):
# The "localtime()" can overflow on some platforms when the timestamp is too large.
# Not sure the fallback won't also overflow, though.
return _fallback_tzinfo(timestamp)
try:
seconds = local.tm_gmtoff
zone = local.tm_zone
except AttributeError:
# The attributes were not availanble on all platforms before Python 3.6.
return _fallback_tzinfo(timestamp)
try:
return timezone(timedelta(seconds=seconds), zone)
except ValueError:
# The number of seconds returned by "tm_gmtoff" might be invalid on Windows (year 2038+).
# Curiously, the fallback workaround does not exhibit the same problem.
return _fallback_tzinfo(timestamp)
def aware_now():
now = datetime_.now()
timestamp = now.timestamp()
tzinfo = _get_tzinfo(timestamp)
return datetime.combine(now.date(), now.time().replace(tzinfo=tzinfo))
| datetime |
python | pytest-dev__pytest | src/_pytest/helpconfig.py | {
"start": 472,
"end": 10019
} | class ____(argparse.Action):
"""An argparse Action that will raise a PrintHelp exception in order to skip
the rest of the argument parsing when --help is passed.
This prevents argparse from raising UsageError when `--help` is used along
with missing required arguments when any are defined, for example by
``pytest_addoption``. This is similar to the way that the builtin argparse
--help option is implemented by raising SystemExit.
To opt in to this behavior, the parse caller must set
`namespace._raise_print_help = True`. Otherwise it just sets the option.
"""
def __init__(
self, option_strings: Sequence[str], dest: str, *, help: str | None = None
) -> None:
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
default=False,
help=help,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
setattr(namespace, self.dest, self.const)
if getattr(namespace, "_raise_print_help", False):
raise PrintHelp
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"-V",
"--version",
action="count",
default=0,
dest="version",
help="Display pytest version and information about plugins. "
"When given twice, also display information about plugins.",
)
group._addoption( # private to use reserved lower-case short option
"-h",
"--help",
action=HelpAction,
dest="help",
help="Show help message and configuration info",
)
group._addoption( # private to use reserved lower-case short option
"-p",
action="append",
dest="plugins",
default=[],
metavar="name",
help="Early-load given plugin module name or entry point (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`. See also --disable-plugin-autoload.",
)
group.addoption(
"--disable-plugin-autoload",
action="store_true",
default=False,
help="Disable plugin auto-loading through entry point packaging metadata. "
"Only plugins explicitly specified in -p or env var PYTEST_PLUGINS will be loaded.",
)
group.addoption(
"--traceconfig",
"--trace-config",
action="store_true",
default=False,
help="Trace considerations of conftest.py files",
)
group.addoption(
"--debug",
action="store",
nargs="?",
const="pytestdebug.log",
dest="debug",
metavar="DEBUG_FILE_NAME",
help="Store internal tracing debug information in this log file. "
"This file is opened with 'w' and truncated as a result, care advised. "
"Default: pytestdebug.log.",
)
group._addoption( # private to use reserved lower-case short option
"-o",
"--override-ini",
dest="override_ini",
action="append",
help='Override configuration option with "option=value" style, '
"e.g. `-o strict_xfail=True -o cache_dir=cache`.",
)
@pytest.hookimpl(wrapper=True)
def pytest_cmdline_parse() -> Generator[None, Config, Config]:
config = yield
if config.option.debug:
# --debug | --debug <file.log> was provided.
path = config.option.debug
debugfile = open(path, "w", encoding="utf-8")
debugfile.write(
"versions pytest-{}, "
"python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format(
pytest.__version__,
".".join(map(str, sys.version_info)),
config.invocation_params.dir,
os.getcwd(),
config.invocation_params.args,
)
)
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write(f"writing pytest debug information to {path}\n")
def unset_tracing() -> None:
debugfile.close()
sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n")
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
return config
def show_version_verbose(config: Config) -> None:
"""Show verbose pytest version installation, including plugins."""
sys.stdout.write(
f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n"
)
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stdout.write(line + "\n")
def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
# Note: a single `--version` argument is handled directly by `Config.main()` to avoid starting up the entire
# pytest infrastructure just to display the version (#13574).
if config.option.version > 1:
show_version_verbose(config)
return ExitCode.OK
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return ExitCode.OK
return None
def showhelp(config: Config) -> None:
import textwrap
reporter: TerminalReporter | None = config.pluginmanager.get_plugin(
"terminalreporter"
)
assert reporter is not None
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line(
"[pytest] configuration options in the first "
"pytest.toml|pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:"
)
tw.line()
columns = tw.fullwidth # costly call
indent_len = 24 # based on argparse's max_help_position=24
indent = " " * indent_len
for name in config._parser._inidict:
help, type, _default = config._parser._inidict[name]
if help is None:
raise TypeError(f"help argument cannot be None for {name}")
spec = f"{name} ({type}):"
tw.write(f" {spec}")
spec_len = len(spec)
if spec_len > (indent_len - 3):
# Display help starting at a new line.
tw.line()
helplines = textwrap.wrap(
help,
columns,
initial_indent=indent,
subsequent_indent=indent,
break_on_hyphens=False,
)
for line in helplines:
tw.line(line)
else:
# Display help starting after the spec, following lines indented.
tw.write(" " * (indent_len - spec_len - 2))
wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False)
if wrapped:
tw.line(wrapped[0])
for line in wrapped[1:]:
tw.line(indent + line)
tw.line()
tw.line("Environment variables:")
vars = [
(
"CI",
"When set to a non-empty value, pytest knows it is running in a "
"CI process and does not truncate summary info",
),
("BUILD_NUMBER", "Equivalent to CI"),
("PYTEST_ADDOPTS", "Extra command line options"),
("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"),
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"),
("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"),
("PYTEST_DEBUG_TEMPROOT", "Override the system temporary directory"),
("PYTEST_THEME", "The Pygments style to use for code output"),
("PYTEST_THEME_MODE", "Set the PYTEST_THEME to be either 'dark' or 'light'"),
]
for name, help in vars:
tw.line(f" {name:<24} {help}")
tw.line()
tw.line()
tw.line("to see available markers type: pytest --markers")
tw.line("to see available fixtures type: pytest --fixtures")
tw.line(
"(shown according to specified file_or_dir or current dir "
"if not specified; fixtures with leading '_' are only shown "
"with the '-v' option"
)
for warningreport in reporter.stats.get("warnings", []):
tw.line("warning : " + warningreport.message, red=True)
def getpluginversioninfo(config: Config) -> list[str]:
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("registered third-party plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, "__file__", repr(plugin))
content = f"{dist.project_name}-{dist.version} at {loc}"
lines.append(" " + content)
return lines
def pytest_report_header(config: Config) -> list[str]:
lines = []
if config.option.debug or config.option.traceconfig:
lines.append(f"using: pytest-{pytest.__version__}")
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, "__file__"):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(f" {name:<20}: {r}")
return lines
| HelpAction |
python | neetcode-gh__leetcode | python/0005-longest-palindromic-substring.py | {
"start": 1,
"end": 594
} | class ____:
def longestPalindrome(self, s: str) -> str:
self.res = ""
self.lenres = 0
for i in range(len(s)):
s1 = self.helper(s, i, i)
s2 = self.helper(s, i, i + 1)
return s2
def helper(self, s, left, right):
while left >= 0 and right < len(s) and s[left] == s[right]:
if (right - left + 1) > self.lenres:
self.res = s[left:right+1]
self.lenres = right - left + 1
left -= 1
right += 1
return self.res
| Solution |
python | pytorch__pytorch | test/inductor/test_mkldnn_pattern_matcher.py | {
"start": 4445,
"end": 9571
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.ctx_stack = contextlib.ExitStack()
self.ctx_stack.enter_context(config.patch({"freezing": True}))
def tearDown(self):
TestCase.tearDown(self)
self.ctx_stack.close()
def _check_unary_is_decomposed(self, unary_fn):
return not any(
isinstance(unary_fn, fn)
for fn in [torch.nn.ReLU, torch.nn.Sigmoid, torch.nn.Tanh]
)
def _clone_inputs(self, inputs):
def clone(x):
if not isinstance(x, torch.Tensor):
return x
return x.clone()
return tuple(clone(x) for x in inputs)
def _test_common(
self,
mod,
inputs,
matcher_check_fn,
atol=1e-5,
rtol=1.3e-6,
check_autocast=torch.float32,
check_quantization=False,
is_qat=False,
dtype=None,
is_dynamic=False,
quantizer=None,
compile_options={}, # noqa: B006
quantization_with_autocast=False,
):
if not hasattr(self, "device"):
has_xpu = any(
isinstance(input, torch.Tensor) and input.device.type == "xpu"
for input in inputs
)
device = "xpu" if has_xpu else "cpu"
else:
device = self.device
mod = mod.to(device=device)
if device != "cpu":
inputs = tuple(
clone_preserve_strides_offset(x, device=device) for x in inputs
)
counters.clear()
torch._dynamo.reset()
if check_autocast == torch.bfloat16 and is_mkldnn_bf16_supported(device):
maybe_autocast = torch.amp.autocast(
device_type=device, dtype=torch.bfloat16
)
atol, rtol = 1e-2, 1e-2
elif check_autocast == torch.float16 and (is_mkldnn_fp16_supported(device)):
maybe_autocast = torch.amp.autocast(device_type=device, dtype=torch.float16)
atol, rtol = 1e-2, 1e-2
else:
assert check_autocast == torch.float32
maybe_autocast = contextlib.nullcontext()
if check_quantization:
if quantization_with_autocast:
with maybe_autocast:
convert_model = _generate_qdq_quantized_model(
mod, inputs, is_qat, is_dynamic, quantizer
)
else:
convert_model = _generate_qdq_quantized_model(
mod, inputs, is_qat, is_dynamic, quantizer
)
with torch.no_grad(), maybe_autocast:
_ = torch.compile(convert_model)(*inputs)
matcher_check_fn()
else:
with torch.no_grad(), maybe_autocast:
clone_inputs = self._clone_inputs(inputs)
expected = mod(*inputs)
actual = torch.compile(mod, **compile_options)(*clone_inputs)
if self.precision != 0:
torch.testing.assert_close(
actual, expected, atol=self.precision, rtol=self.precision
)
else:
torch.testing.assert_close(actual, expected, atol=atol, rtol=rtol)
matcher_check_fn()
def _test_code_common(
self,
mod,
inputs,
include_ops,
exclude_ops,
atol=1e-5,
rtol=1.3e-6,
check_quantization=False,
check_dynamic=None,
num_include_ops=None,
quantizer=None,
):
with torch.no_grad():
clone_inputs = self._clone_inputs(inputs)
if check_quantization:
mod = _generate_qdq_quantized_model(mod, inputs, quantizer=quantizer)
expected = mod(*inputs)
actual, (source_code,) = run_and_get_code(
torch.compile(mod, fullgraph=True, dynamic=check_dynamic),
*clone_inputs,
)
assert_keywords = ["assert_size_stride", "assert_alignment"]
filtered_lines = [
line
for line in source_code.splitlines()
if not any(assert_key in line for assert_key in assert_keywords)
]
source_code = "\n".join(filtered_lines)
for op in include_ops:
self.assertIn(op, source_code)
if num_include_ops is not None:
assert len(include_ops) == len(num_include_ops)
for i in range(len(include_ops)):
self.assertEqual(
source_code.count(include_ops[i]), num_include_ops[i]
)
for op in exclude_ops:
self.assertNotIn(op, source_code)
if check_dynamic is not None:
_check_has_dynamic_shape(self, source_code)
if not check_quantization:
# Skip due to reduce range setting for Quantization on preCI system.
torch.testing.assert_close(actual, expected, atol=atol, rtol=rtol)
| TestPatternMatcherBase |
python | pola-rs__polars | py-polars/tests/unit/constructors/test_constructors.py | {
"start": 1924,
"end": 1996
} | class ____(NamedTuple):
d: datetime
e: float
f: str
| _TestBazNT |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_init.py | {
"start": 49839,
"end": 55693
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 8
def _init_models(self):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=3, dropout_p=0.0)
model = Transformer(model_args)
for param in model.parameters():
dist.broadcast(param.detach(), src=0)
ref_model = copy.deepcopy(model)
return model, ref_model
@skip_if_lt_x_gpu(1)
def test_init_1d_transformer_shard_largest_dim(self):
model, ref_model = self._init_models()
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
largest_dim = largest_dim_size = -1
for dim, dim_size in enumerate(param.shape):
if dim_size > largest_dim_size:
largest_dim = dim
largest_dim_size = dim_size
assert largest_dim >= 0, f"{param.shape}"
return Shard(largest_dim)
for layer in model.layers:
fully_shard(layer, shard_placement_fn=shard_placement_fn)
fully_shard(model, shard_placement_fn=shard_placement_fn)
any_shard_dim1 = False
for param in model.parameters():
self.assertEqual(len(param.placements), 1)
self.assertIsInstance(param.placements[0], Shard)
any_shard_dim1 |= param.placements[0].dim == 1
self.assertTrue(any_shard_dim1)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
full_param = param.full_tensor()
self.assertEqual(full_param, ref_param)
@skip_if_lt_x_gpu(1)
def test_init_1d_transformer_shard_dim_neg1(self):
model, ref_model = self._init_models()
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
# Check that FSDP will normalize this dim to non-negative
return Shard(-1)
for layer in model.layers:
fully_shard(layer, shard_placement_fn=shard_placement_fn)
fully_shard(model, shard_placement_fn=shard_placement_fn)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
full_param = param.full_tensor()
self.assertEqual(full_param, ref_param)
@skip_if_lt_x_gpu(1)
def test_init_2d_transformer_shard_diff_dim(self):
model, ref_model = self._init_models()
dp_size, tp_size = self.world_size // 2, 2
global_mesh = init_device_mesh(
device_type.type, (dp_size, tp_size), mesh_dim_names=("dp", "tp")
)
model = Transformer.parallelize(model, global_mesh["tp"], use_seq_parallel=True)
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
if isinstance(param, DTensor):
for placement in param.placements:
if isinstance(placement, Shard):
shard_dim = param.ndim - 1 - placement.dim
assert shard_dim >= 0, f"{param.shape}"
return Shard(shard_dim)
return Shard(0)
for layer in model.layers:
fully_shard(
layer, mesh=global_mesh["dp"], shard_placement_fn=shard_placement_fn
)
fully_shard(
model, mesh=global_mesh["dp"], shard_placement_fn=shard_placement_fn
)
linear_weight_names = ["wq", "wk", "wv", "wo", "w1", "w2"]
for param_name, param in model.named_parameters():
if (
any(n in param_name for n in linear_weight_names)
and "weight" in param_name
):
total_placement_dims = 0
for placement in param.placements:
self.assertTrue(isinstance(placement, Shard))
total_placement_dims += placement.dim
self.assertEqual(param.ndim, 2)
# Check that FSDP shards on either dim-0 or dim-1, and TP
# shards on the other
self.assertEqual(total_placement_dims, 1)
else:
self.assertTrue(
any(isinstance(placement, Shard) for placement in param.placements)
)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
full_param = param.full_tensor()
self.assertEqual(full_param, ref_param)
@skip_if_lt_x_gpu(1)
def test_init_1d_uneven_shard_largest_dim(self):
torch.manual_seed(42)
model = nn.Sequential(nn.Linear(16, 17), nn.Linear(17, 8))
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
largest_dim = -1
largest_dim_size = -1
for dim, dim_size in enumerate(param.shape):
if dim_size > largest_dim_size:
largest_dim = dim
largest_dim_size = dim_size
assert largest_dim >= 0, f"{param.shape}"
assert largest_dim < param.ndim, f"{largest_dim=} {param.shape}"
return Shard(largest_dim)
with self.assertRaisesRegex(
NotImplementedError, "FSDP does not support uneven sharding on dim 1"
):
fully_shard(model, shard_placement_fn=shard_placement_fn)
@skip_if_lt_x_gpu(1)
def test_invalid_shard_dim(self):
model = nn.Sequential(nn.Linear(16, 16), nn.Linear(16, 8))
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
return Shard(1)
# Shard(1) is invalid for 1D bias parameters
with self.assertRaisesRegex(
AssertionError, "Shard dim 1 is invalid for 1D tensor"
):
fully_shard(model, shard_placement_fn=shard_placement_fn)
# TODO: Remove this test class once we remove the old import path:
# torch/distributed/_composable/fsdp
| TestFullyShardShardPlacementFn |
python | doocs__leetcode | solution/0800-0899/0897.Increasing Order Search Tree/Solution.py | {
"start": 192,
"end": 594
} | class ____:
def increasingBST(self, root: TreeNode) -> TreeNode:
def dfs(root):
if root is None:
return
nonlocal prev
dfs(root.left)
prev.right = root
root.left = None
prev = root
dfs(root.right)
dummy = prev = TreeNode(right=root)
dfs(root)
return dummy.right
| Solution |
python | dask__distributed | distributed/http/prometheus.py | {
"start": 145,
"end": 618
} | class ____:
def __init__(self, server):
self.server = server
self.namespace = dask.config.get("distributed.dashboard.prometheus.namespace")
self.subsystem = None
def build_name(self, name):
full_name = []
if self.namespace:
full_name.append(self.namespace)
if self.subsystem:
full_name.append(self.subsystem)
full_name.append(name)
return "_".join(full_name)
| PrometheusCollector |
python | astropy__astropy | astropy/io/fits/column.py | {
"start": 18219,
"end": 54855
} | class ____(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1], 4 * " "))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
f"(got {name!r}).",
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of at most 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
valid |= {
k: v
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]
if (v is not None and v != "")
}
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
f"(got {null!r})."
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
f"binary table columns (got {null!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column."
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
f"table columns of type {format!r} (got {null!r}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
f"table columns (got {start!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column."
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
f"(got {start!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = ()
# NOTE: If valid, the dim keyword's value in the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
f"columns (got {dim!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column."
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if isinstance(recformat, _FormatP):
# TDIMs have different meaning for VLA format,
# no warning should be thrown
msg = None
elif reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
f"The repeat count of the column format {name!r} for column "
f"{format!r} is fewer than the number of elements per the TDIM "
f"argument {dim!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column."
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
f"(got {coord_type!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column."
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
f"of at most 8 characters (got {coord_type!r}). The invalid "
"keyword will be ignored for the purpose of formatting this column."
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
f"(got {coord_unit!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column."
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
f"Column {k} option ({ATTRIBUTE_TO_KEYWORD[k]}n) must be a "
f"real floating type (got {v!r}). The invalid value will be "
"ignored for the purpose of formatting the data in this column."
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
f"a string (got {time_ref_pos!r}). The invalid keyword will be "
"ignored for the purpose of formatting this column."
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims and format.format not in "PQ":
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
| Column |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/relational/base_element.py | {
"start": 1980,
"end": 18348
} | class ____(NodeParser):
"""
Splits a document into Text Nodes and Index Nodes corresponding to embedded objects.
Supports text and tables currently.
"""
callback_manager: CallbackManager = Field(
default_factory=lambda: CallbackManager([]), exclude=True
)
llm: Optional[LLM] = Field(
default=None, description="LLM model to use for summarization."
)
summary_query_str: str = Field(
default=DEFAULT_SUMMARY_QUERY_STR,
description="Query string to use for summarization.",
)
num_workers: int = Field(
default=DEFAULT_NUM_WORKERS,
description="Num of workers for async jobs.",
)
show_progress: bool = Field(default=True, description="Whether to show progress.")
nested_node_parser: Optional[NodeParser] = Field(
default=None,
description="Other types of node parsers to handle some types of nodes.",
)
@classmethod
def class_name(cls) -> str:
return "BaseElementNodeParser"
@classmethod
def from_defaults(
cls,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> "BaseElementNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
callback_manager=callback_manager,
**kwargs,
)
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
async def _aparse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = await self.aget_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
@abstractmethod
def get_nodes_from_node(self, node: TextNode) -> List[BaseNode]:
"""Get nodes from node."""
...
async def aget_nodes_from_node(self, node: TextNode) -> List[BaseNode]:
"""Get nodes from node."""
return self.get_nodes_from_node(node)
@abstractmethod
def extract_elements(self, text: str, **kwargs: Any) -> List[Element]:
"""Extract elements from text."""
def get_table_elements(self, elements: List[Element]) -> List[Element]:
"""Get table elements."""
return [e for e in elements if e.type == "table" or e.type == "table_text"]
def get_text_elements(self, elements: List[Element]) -> List[Element]:
"""Get text elements."""
# TODO: There we should maybe do something with titles
# and other elements in the future?
return [e for e in elements if e.type != "table"]
def extract_table_summaries(self, elements: List[Element]) -> None:
"""Go through elements, extract out summaries that are tables."""
from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.settings import Settings
llm = self.llm or Settings.llm
table_context_list = []
for idx, element in tqdm(enumerate(elements)):
if element.type not in ("table", "table_text"):
continue
table_context = str(element.element)
if idx > 0 and str(elements[idx - 1].element).lower().strip().startswith(
"table"
):
table_context = str(elements[idx - 1].element) + "\n" + table_context
if idx < len(elements) + 1 and str(
elements[idx - 1].element
).lower().strip().startswith("table"):
table_context += "\n" + str(elements[idx + 1].element)
table_context_list.append(table_context)
async def _get_table_output(table_context: str, summary_query_str: str) -> Any:
index = SummaryIndex.from_documents(
[Document(text=table_context)],
)
query_engine = index.as_query_engine(llm=llm, output_cls=TableOutput)
try:
response = await query_engine.aquery(summary_query_str)
if isinstance(response, PydanticResponse):
return response.response
else:
raise ValueError(f"Expected PydanticResponse, got {type(response)}")
except (ValidationError, ValueError):
# There was a pydantic validation error, so we will run with text completion
# fill in the summary and leave other fields blank
query_engine = index.as_query_engine(llm=llm)
response_txt = await query_engine.aquery(summary_query_str)
return TableOutput(summary=str(response_txt), columns=[])
summary_jobs = [
_get_table_output(table_context, self.summary_query_str)
for table_context in table_context_list
]
summary_co = run_jobs(summary_jobs, workers=self.num_workers)
summary_outputs = asyncio_run(summary_co)
for element, summary_output in zip(elements, summary_outputs):
element.table_output = summary_output
async def aextract_table_summaries(self, elements: List[Element]) -> None:
"""Go through elements, extract out summaries that are tables."""
from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.settings import Settings
llm = self.llm or Settings.llm
table_context_list = []
if elements:
for idx, element in tqdm(enumerate(elements)):
if element.type not in ("table", "table_text"):
continue
table_context = str(element.element)
if idx > 0 and str(
elements[idx - 1].element
).lower().strip().startswith("table"):
table_context = (
str(elements[idx - 1].element) + "\n" + table_context
)
if idx < len(elements) + 1 and str(
elements[idx - 1].element
).lower().strip().startswith("table"):
table_context += "\n" + str(elements[idx + 1].element)
table_context_list.append(table_context)
async def _get_table_output(table_context: str, summary_query_str: str) -> Any:
index = SummaryIndex.from_documents(
[Document(text=table_context)],
)
query_engine = index.as_query_engine(llm=llm, output_cls=TableOutput)
try:
response = await query_engine.aquery(summary_query_str)
return cast(PydanticResponse, response).response
except (ValidationError, ValueError):
# There was a pydantic validation error, so we will run with text completion
# fill in the summary and leave other fields blank
query_engine = index.as_query_engine(llm=llm)
response_txt = await query_engine.aquery(summary_query_str)
return TableOutput(summary=str(response_txt), columns=[])
summary_jobs = [
_get_table_output(table_context, self.summary_query_str)
for table_context in table_context_list
]
summary_outputs = await run_jobs(summary_jobs, workers=self.num_workers)
for element, summary_output in zip(elements, summary_outputs):
element.table_output = summary_output
def get_base_nodes_and_mappings(
self, nodes: List[BaseNode]
) -> Tuple[List[BaseNode], Dict]:
"""
Get base nodes and mappings.
Given a list of nodes and IndexNode objects, return the base nodes and a mapping
from index id to child nodes (which are excluded from the base nodes).
"""
node_dict = {node.node_id: node for node in nodes}
node_mappings = {}
base_nodes = []
# first map index nodes to their child nodes
nonbase_node_ids = set()
for node in nodes:
if isinstance(node, IndexNode):
node_mappings[node.index_id] = node_dict[node.index_id]
nonbase_node_ids.add(node.index_id)
else:
pass
# then add all nodes that are not children of index nodes
for node in nodes:
if node.node_id not in nonbase_node_ids:
base_nodes.append(node)
return base_nodes, node_mappings
def get_nodes_and_objects(
self, nodes: List[BaseNode]
) -> Tuple[List[BaseNode], List[IndexNode]]:
base_nodes, node_mappings = self.get_base_nodes_and_mappings(nodes)
nodes = []
objects = []
for node in base_nodes:
if isinstance(node, IndexNode):
node.obj = node_mappings[node.index_id]
objects.append(node)
else:
nodes.append(node)
return nodes, objects
def _get_nodes_from_buffer(
self, buffer: List[str], node_parser: NodeParser
) -> List[BaseNode]:
"""Get nodes from buffer."""
doc = Document(text="\n\n".join(list(buffer)))
return node_parser.get_nodes_from_documents([doc])
def get_nodes_from_elements(
self,
elements: List[Element],
node_inherited: Optional[TextNode] = None,
ref_doc_text: Optional[str] = None,
) -> List[BaseNode]:
"""Get nodes and mappings."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
from llama_index.core.node_parser import SentenceSplitter
node_parser = self.nested_node_parser or SentenceSplitter()
nodes: List[BaseNode] = []
cur_text_el_buffer: List[str] = []
for element in elements:
if element.type == "table" or element.type == "table_text":
# flush text buffer for table
if len(cur_text_el_buffer) > 0:
cur_text_nodes = self._get_nodes_from_buffer(
cur_text_el_buffer, node_parser
)
nodes.extend(cur_text_nodes)
cur_text_el_buffer = []
table_output = cast(TableOutput, element.table_output)
table_md = ""
if element.type == "table":
table_df = cast(pd.DataFrame, element.table)
# We serialize the table as markdown as it allow better accuracy
# We do not use the table_df.to_markdown() method as it generate
# a table with a token hungry format.
table_md = "|"
for col_name, col in table_df.items():
table_md += f"{col_name}|"
table_md += "\n|"
for col_name, col in table_df.items():
table_md += f"---|"
table_md += "\n"
for row in table_df.itertuples():
table_md += "|"
for col in row[1:]:
table_md += f"{col}|"
table_md += "\n"
elif element.type == "table_text":
# if the table is non-perfect table, we still want to keep the original text of table
table_md = str(element.element)
col_schema = "\n\n".join([str(col) for col in table_output.columns])
# We build a summary of the table containing the extracted summary, and a description of the columns
table_summary = str(table_output.summary)
if table_output.table_title:
table_summary += ",\nwith the following table title:\n"
table_summary += str(table_output.table_title)
table_summary += ",\nwith the following columns:\n"
for col in table_output.columns:
table_summary += f"- {col.col_name}: {col.summary}\n"
# attempt to find start_char_idx for table
# raw table string regardless if perfect or not is stored in element.element
if ref_doc_text:
start_char_idx = ref_doc_text.find(str(element.element))
if start_char_idx >= 0:
end_char_idx = start_char_idx + len(str(element.element))
else:
start_char_idx = None # type: ignore
end_char_idx = None # type: ignore
else:
start_char_idx = None # type: ignore
end_char_idx = None # type: ignore
# shared index_id and node_id
node_id = str(uuid.uuid4())
index_node = IndexNode(
text=table_summary,
metadata={
"col_schema": col_schema,
},
excluded_embed_metadata_keys=["col_schema"],
index_id=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
)
table_str = table_summary + "\n" + table_md
text_node = TextNode(
id_=node_id,
text=table_str,
metadata={
# serialize the table as a dictionary string for dataframe of perfect table
"table_df": (
str(table_df.to_dict())
if element.type == "table"
else table_md
),
# add table summary for retrieval purposes
"table_summary": table_summary,
},
excluded_embed_metadata_keys=["table_df", "table_summary"],
excluded_llm_metadata_keys=["table_df", "table_summary"],
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
)
nodes.extend([index_node, text_node])
else:
cur_text_el_buffer.append(str(element.element))
# flush text buffer for the last batch
if len(cur_text_el_buffer) > 0:
cur_text_nodes = self._get_nodes_from_buffer(
cur_text_el_buffer, node_parser
)
nodes.extend(cur_text_nodes)
cur_text_el_buffer = []
# remove empty nodes and keep node original metadata inherited from parent nodes
for node in nodes:
if node_inherited and node_inherited.metadata:
node.metadata.update(node_inherited.metadata)
node.excluded_embed_metadata_keys = (
node_inherited.excluded_embed_metadata_keys
)
node.excluded_llm_metadata_keys = (
node_inherited.excluded_llm_metadata_keys
)
return [
node
for node in nodes
if len(node.get_content(metadata_mode=MetadataMode.NONE)) > 0
]
def __call__(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[BaseNode]:
nodes = self.get_nodes_from_documents(nodes, **kwargs) # type: ignore
nodes, objects = self.get_nodes_and_objects(nodes)
return nodes + objects # type: ignore
async def acall(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[BaseNode]:
nodes = await self.aget_nodes_from_documents(nodes, **kwargs) # type: ignore
nodes, objects = self.get_nodes_and_objects(nodes)
return nodes + objects # type: ignore
| BaseElementNodeParser |
python | numba__numba | numba/tests/test_ctypes.py | {
"start": 302,
"end": 1786
} | class ____(TestCase):
def _conversion_tests(self, check):
check(c_double, types.float64)
check(c_int, types.intc)
check(c_uint16, types.uint16)
check(c_size_t, types.size_t)
check(c_ssize_t, types.ssize_t)
check(c_void_p, types.voidptr)
check(POINTER(c_float), types.CPointer(types.float32))
check(POINTER(POINTER(c_float)),
types.CPointer(types.CPointer(types.float32)))
check(None, types.void)
def test_from_ctypes(self):
"""
Test converting a ctypes type to a Numba type.
"""
def check(cty, ty):
got = ctypes_utils.from_ctypes(cty)
self.assertEqual(got, ty)
self._conversion_tests(check)
# An unsupported type
with self.assertRaises(TypeError) as raises:
ctypes_utils.from_ctypes(c_wchar_p)
self.assertIn("Unsupported ctypes type", str(raises.exception))
def test_to_ctypes(self):
"""
Test converting a Numba type to a ctypes type.
"""
def check(cty, ty):
got = ctypes_utils.to_ctypes(ty)
self.assertEqual(got, cty)
self._conversion_tests(check)
# An unsupported type
with self.assertRaises(TypeError) as raises:
ctypes_utils.to_ctypes(types.ellipsis)
self.assertIn("Cannot convert Numba type '...' to ctypes type",
str(raises.exception))
| TestCTypesTypes |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_issue_metrics.py | {
"start": 242,
"end": 18688
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-issue-metrics"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.slug,))
@freeze_time(datetime.now(tz=timezone.utc).replace(microsecond=100))
def test_get_errors(self) -> None:
project1 = self.create_project(teams=[self.team], slug="foo")
project2 = self.create_project(teams=[self.team], slug="bar")
one = self.create_release(project1, version="1.0.0")
two = self.create_release(project2, version="1.2.0")
curr = datetime.now(tz=timezone.utc)
before_curr = curr - timedelta(microseconds=100)
prev = before_curr - timedelta(hours=1)
after_prev = prev + timedelta(microseconds=100)
# Release issues.
self.create_group(
project=project1,
status=0,
first_seen=curr,
first_release=one,
type=1,
create_open_period=False,
)
self.create_group(
project=project1,
status=1,
first_seen=prev,
first_release=one,
type=2,
create_open_period=False,
)
self.create_group(
project=project2,
status=1,
first_seen=curr,
first_release=two,
type=3,
create_open_period=False,
)
self.create_group(
project=project2,
status=2,
first_seen=curr,
first_release=two,
type=4,
create_open_period=False,
)
self.create_group(
project=project2,
status=2,
first_seen=curr,
first_release=two,
type=FeedbackGroup.type_id,
)
# Time based issues.
self.create_group(
project=project1, status=0, first_seen=curr, type=1, create_open_period=False
)
self.create_group(
project=project1,
status=1,
first_seen=before_curr,
resolved_at=curr,
type=2,
create_open_period=False,
)
self.create_group(
project=project2,
status=1,
first_seen=prev,
resolved_at=after_prev,
type=3,
create_open_period=False,
)
self.create_group(
project=project2, status=2, first_seen=prev, type=4, create_open_period=False
)
self.create_group(project=project2, status=2, first_seen=prev, type=FeedbackGroup.type_id)
response = self.client.get(
self.url + f"?start={prev.isoformat()[:-6]}&end={curr.isoformat()[:-6]}&category=issue"
)
response_json = response.json()
assert response_json["timeseries"] == [
{
"axis": "new_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 3},
{"timestamp": int(curr.timestamp()), "value": 5},
],
},
{
"axis": "resolved_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 1},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.0.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 1},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.2.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 1,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 2},
],
},
]
def test_get_issues_by_project(self) -> None:
"""Assert the project filter works."""
project1 = self.create_project(teams=[self.team], slug="foo")
project2 = self.create_project(teams=[self.team], slug="bar")
curr = datetime.now(tz=timezone.utc)
prev = curr - timedelta(hours=1)
self.create_group(project=project1, status=0, first_seen=curr, type=1)
self.create_group(project=project2, status=0, first_seen=curr, type=1)
response = self.client.get(
self.url
+ f"?start={prev.isoformat()[:-6]}&end={curr.isoformat()[:-6]}&category=issue&project={project1.id}"
)
response_json = response.json()
assert response_json["timeseries"] == [
{
"axis": "new_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "resolved_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 0},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 0},
],
},
]
@freeze_time(datetime.now(tz=timezone.utc).replace(microsecond=100))
def test_get_feedback(self) -> None:
project1 = self.create_project(teams=[self.team], slug="foo")
project2 = self.create_project(teams=[self.team], slug="bar")
curr = datetime.now(tz=timezone.utc)
before_curr = curr - timedelta(microseconds=100)
prev = before_curr - timedelta(hours=1)
after_prev = prev + timedelta(microseconds=100)
# New cohort
self.create_group(project=project1, status=0, first_seen=curr, type=1)
self.create_group(
project=project1, status=1, first_seen=curr, type=2, create_open_period=False
)
self.create_group(
project=project2, status=1, first_seen=curr, type=3, create_open_period=False
)
self.create_group(project=project2, status=2, first_seen=prev, type=FeedbackGroup.type_id)
self.create_group(project=project2, status=2, first_seen=curr, type=FeedbackGroup.type_id)
# Resolved cohort
self.create_group(
project=project1,
status=0,
first_seen=before_curr,
resolved_at=curr,
type=2,
create_open_period=False,
)
self.create_group(
project=project1,
status=1,
first_seen=before_curr,
resolved_at=curr,
type=3,
create_open_period=False,
)
self.create_group(
project=project2,
status=1,
first_seen=prev,
resolved_at=after_prev,
type=FeedbackGroup.type_id,
create_open_period=False,
)
self.create_group(
project=project2,
status=1,
first_seen=before_curr,
resolved_at=curr,
type=FeedbackGroup.type_id,
create_open_period=False,
)
self.create_group(
project=project2,
status=2,
first_seen=before_curr,
resolved_at=curr,
type=5,
create_open_period=False,
)
response = self.client.get(
self.url
+ f"?start={prev.isoformat()[:-6]}&end={curr.isoformat()[:-6]}&category=feedback"
)
response_json = response.json()
assert response_json["timeseries"] == [
{
"axis": "new_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 2},
{"timestamp": int(curr.timestamp()), "value": 2},
],
},
{
"axis": "resolved_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 1},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 0},
],
},
]
def test_get_too_much_granularity(self) -> None:
response = self.client.get(self.url + "?statsPeriod=14d&interval=1001")
assert response.status_code == 400
assert response.json() == {
"detail": "The specified granularity is too precise. Increase your interval."
}
def test_get_invalid_interval(self) -> None:
response = self.client.get(self.url + "?interval=foo")
assert response.status_code == 400
assert response.json() == {"detail": "Could not parse interval value."}
def test_get_zero_interval(self) -> None:
response = self.client.get(self.url + "?interval=0")
assert response.status_code == 400
assert response.json() == {"detail": "Interval must be greater than 1000 milliseconds."}
def test_get_invalid_category(self) -> None:
response = self.client.get(self.url + "?category=foo")
assert response.status_code == 400
assert response.json() == {
"detail": "Invalid issue category. Valid options are 'issue' and 'feedback'."
}
def test_other_grouping(self) -> None:
project1 = self.create_project(teams=[self.team], slug="foo")
project2 = self.create_project(teams=[self.team], slug="bar")
one = self.create_release(project1, version="1.0.0")
two = self.create_release(project2, version="1.1.0")
three = self.create_release(project2, version="1.2.0")
four = self.create_release(project2, version="1.3.0")
fifth = self.create_release(project2, version="1.4.0")
sixth = self.create_release(project2, version="1.5.0")
curr = datetime.now(tz=timezone.utc)
prev = curr - timedelta(hours=1)
# Release issues.
self.create_group(project=project1, status=0, first_seen=curr, first_release=one, type=1)
self.create_group(project=project1, status=0, first_seen=curr, first_release=two, type=1)
self.create_group(project=project1, status=0, first_seen=curr, first_release=three, type=1)
self.create_group(project=project1, status=0, first_seen=curr, first_release=four, type=1)
self.create_group(project=project1, status=0, first_seen=curr, first_release=fifth, type=1)
self.create_group(project=project1, status=0, first_seen=curr, first_release=sixth, type=1)
response = self.client.get(
self.url + f"?start={prev.isoformat()[:-6]}&end={curr.isoformat()[:-6]}&category=issue"
)
response_json = response.json()
assert response_json["timeseries"] == [
{
"axis": "new_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 6},
],
},
{
"axis": "resolved_issues_count",
"groupBy": [],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 0},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.1.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.2.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 1,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.3.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 2,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.4.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 3,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["1.5.0"],
"meta": {
"interval": 3600000,
"isOther": False,
"order": 4,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
{
"axis": "new_issues_count_by_release",
"groupBy": ["other"],
"meta": {
"interval": 3600000,
"isOther": True,
"order": 5,
"valueType": "integer",
"valueUnit": None,
},
"values": [
{"timestamp": int(prev.timestamp()), "value": 0},
{"timestamp": int(curr.timestamp()), "value": 1},
],
},
]
| OrganizationIssueMetricsTestCase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 643406,
"end": 643717
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(Submodule, graphql_name="node")
| SubmoduleEdge |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 13338,
"end": 13728
} | class ____(GroupType):
type_id = 1009
slug = "performance_consecutive_http"
description = "Consecutive HTTP"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.HTTP_CLIENT.value
noise_config = NoiseConfig(ignore_limit=5)
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| PerformanceConsecutiveHTTPQueriesGroupType |
python | tensorflow__tensorflow | tensorflow/python/distribute/ps_values.py | {
"start": 17481,
"end": 22081
} | class ____(resource_variable_ops.BaseResourceVariable):
"""A wrapper around unsynced variables created on workers.
`PerWorkerVariable`s are variables that are stored on workers and not
synchronized. A `PerWorkerVariable` is really a wrapper around multiple
independent `Variable`s stored on independent worker machines.
`PerWorkerVariable` is currently only tested and supported when used with
`ParameterServerStrategy`. A `PerWorkerVariable` can be created by creating a
`Variable` within strategy scope and using the `per_worker_variable` flag,
e.g.:
```
with strategy.scope():
var = tf.Variable(initial_value=0.0, per_worker_variable=True)
```
The implementation modifies the graph to ensure that a worker's local version
of the variable is used for computation at call time, while needing only one
function trace and requiring no code changes beyond the `per_worker_variable`
flag. `PerWorkerVariable`s can thus be treated like a standard `Variable`, but
support is experimental and not all ops have been tested.
All per-worker values can be retrieved and read into a list via
`PerWorkerVariable.read_all()`.
Caveats:
- `PerWorkerVariable`s should not be used as direct inputs to a
`tf.function`. That is, they should not appear in a tf.function header as
an input argument. However they can still be read and manipulated in a
`tf.function`.
- The `shape` argument must be fully-defined (no `None` entries) or left
empty. Partially-defined shapes are not yet supported.
- Automatic control dependencies do not work with `PerWorkerVariable`s, so
returning a `PerWorkerVariable` is not supported, and `read_all()` should
be used to retrieve values. (TODO: b/286052052)
- `PerWorkerVariable`s should not be created within a `tf.function`.
"""
def __init__(self, strategy, next_creator, **kwargs):
self._coordinator = strategy._cluster_coordinator
self._per_worker_vars = None
self._var_creator = functools.partial(next_creator, **kwargs)
self._coordinator_instance = next_creator(**kwargs)
# Set ResourceVariable attributes based on kwargs
if kwargs.get("in_graph_mode") is None:
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
else:
self._in_graph_mode = kwargs["in_graph_mode"]
self._cached_value = None
self._shape = self._coordinator_instance.shape
self._dtype = self._coordinator_instance.dtype
self._trainable = False # not supported
self._unique_id = kwargs.get("unique_id")
if kwargs.get("handle_name") is None:
self._handle_name = "Variable:0"
else:
self._handle_name = kwargs["handle_name"] + ":0"
self._validate_shape = kwargs.get("validate_shape", True)
@classmethod
def _variable_call(cls, *args, **kwargs):
"""Override to be a no-op to avoid metaclass creating ResourceVariables."""
return None
@property
def handle(self):
if context.executing_eagerly() or save_context.in_save_context():
return self._coordinator_instance.handle
else:
self._maybe_create_per_worker_vars()
closure, spec = self.handle_call_time_value()
return ops.get_default_graph().capture_call_time_value(
closure,
spec)
def handle_call_time_value(self):
"""Returns a closure to run for a handle at call time and its spec.
This function is called in self.handle to create a placeholder
which returns a handle on some worker or on the coordinator.
"""
def closure():
dispatch_context = coordinator_context.get_current_dispatch_context()
if dispatch_context:
remote_value = self._per_worker_vars._values[ # pylint: disable=protected-access
dispatch_context.worker_index]
ret = dispatch_context.maybe_get_remote_value(remote_value)
return ret.handle
else:
# Only needed for tracing
return self._coordinator_instance.handle
return closure, PerWorkerVariableSpec(
value=self._coordinator_instance.handle)
def _maybe_create_per_worker_vars(self):
"""Create variable on each worker if it hasn't been created."""
if not self._per_worker_vars:
self._per_worker_vars = (
self._coordinator._create_per_worker_variables(self._var_creator)) # pylint: disable=protected-access
def read_all(self):
"""Synchronously read variables from all workers into a list of Tensors."""
return [wv.get() for wv in self._per_worker_vars._values] # pylint: disable=protected-access
| PerWorkerVariable |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py | {
"start": 5036,
"end": 6421
} | class ____(BertAttention):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__(config, is_causal, layer_idx, is_cross_attention)
del self.LayerNorm
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
intermediate = self.self_attn_layer_norm(hidden_states)
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
intermediate,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| XLMRobertaXLAttention |
python | python-poetry__poetry | src/poetry/utils/authenticator.py | {
"start": 1093,
"end": 1980
} | class ____:
cert: Path | None = dataclasses.field(default=None)
client_cert: Path | None = dataclasses.field(default=None)
verify: bool = dataclasses.field(default=True)
@classmethod
def create(
cls, repository: str, config: Config | None
) -> RepositoryCertificateConfig:
config = config if config else Config.create()
verify: str | bool = config.get(
f"certificates.{repository}.verify",
config.get(f"certificates.{repository}.cert", True),
)
client_cert: str = config.get(f"certificates.{repository}.client-cert")
return cls(
cert=Path(verify) if isinstance(verify, str) else None,
client_cert=Path(client_cert) if client_cert else None,
verify=verify if isinstance(verify, bool) else True,
)
@dataclasses.dataclass
| RepositoryCertificateConfig |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 335,
"end": 678
} | class ____(NonStrictDataModel):
""" """
_schema = {
"metric": {"description": "The metric name", "type": "string"},
"type": "object",
"variants": {
"description": "The names of the metric variants",
"items": {"type": "string"},
"type": "array",
},
}
| MetricVariants |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar9.py | {
"start": 2079,
"end": 2234
} | class ____(Generic[AnyStr]):
# This should generate an error because AnyStr can go unsolved.
def __init__(self, *, mode: AnyStr = ...) -> None: ...
| B |
python | justquick__django-activity-stream | actstream/templatetags/activity_tags.py | {
"start": 3861,
"end": 8314
} | class ____(AsNode):
args_count = 3
def render_result(self, context):
user = self.args[0].resolve(context)
actor = self.args[1].resolve(context)
flag = self.args[2].resolve(context)
return Follow.objects.is_following(user, actor, flag=flag)
def is_following_tag(parser, token):
"""
Returns true if the given user is following the actor marked by a flag, such as 'liking', 'watching' etc..
You can also save the returned value to a template variable by as syntax.
If you don't want to specify a flag, pass an empty string or use `is_following` template filter.
::
{% is_following user group "liking" %}
{% is_following user group "liking" as is_liking %}
{% is_following user group "" as is_following %}
"""
return IsFollowing.handle_token(parser, token)
def follow_url(parser, token):
"""
Renders the URL of the follow view for a particular actor instance
::
<a href="{% follow_url other_user %}">
{% if request.user|is_following:other_user %}
stop following
{% else %}
follow
{% endif %}
</a>
<a href="{% follow_url other_user 'watching' %}">
{% is_following user group "watching" as is_watching %}
{% if is_watching %}
stop watching
{% else %}
watch
{% endif %}
</a>
"""
bits = token.split_contents()
if len(bits) > 3:
raise TemplateSyntaxError("Accepted format {% follow_url [instance] %} or {% follow_url [instance] [flag] %}")
elif len(bits) == 2:
return DisplayActivityFollowUrl(bits[1])
else:
flag = bits[2][1:-1]
return DisplayActivityFollowUrl(bits[1], flag=flag)
def follow_all_url(parser, token):
"""
Renders the URL to follow an object as both actor and target
::
<a href="{% follow_all_url other_user %}">
{% if request.user|is_following:other_user %}
stop following
{% else %}
follow
{% endif %}
</a>
<a href="{% follow_all_url other_user 'watching' %}">
{% is_following user group "watching" as is_watching %}
{% if is_watching %}
stop watching
{% else %}
watch
{% endif %}
</a>
"""
bits = token.split_contents()
if len(bits) > 3:
raise TemplateSyntaxError(
"Accepted format {% follow_all_url [instance] %} or {% follow_url [instance] [flag] %}"
)
elif len(bits) == 2:
return DisplayActivityFollowUrl(bits[1], actor_only=False)
else:
flag = bits[2][1:-1]
return DisplayActivityFollowUrl(bits[1], actor_only=False, flag=flag)
def actor_url(parser, token):
"""
Renders the URL for a particular actor instance
::
<a href="{% actor_url request.user %}">View your actions</a>
<a href="{% actor_url another_user %}">{{ another_user }}'s actions</a>
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("Accepted format "
"{% actor_url [actor_instance] %}")
else:
return DisplayActivityActorUrl(*bits[1:])
def activity_stream(context, stream_type, *args, **kwargs):
"""
Renders an activity stream as a list into the template's context.
Streams loaded by stream_type can be the default ones (eg user, actor, etc.) or a user defined stream.
Extra args/kwargs are passed into the stream call.
::
{% activity_stream 'actor' user %}
{% for action in stream %}
{% display_action action %}
{% endfor %}
"""
if stream_type == 'model':
stream_type = 'model_actions'
if not hasattr(Action.objects, stream_type):
raise TemplateSyntaxError('Action manager has no attribute: %s' % stream_type)
ctxvar = kwargs.pop('as', 'stream')
context[ctxvar] = getattr(Action.objects, stream_type)(*args, **kwargs)
return ''
register.filter(activity_stream)
register.filter(is_following)
register.tag(name='is_following', compile_function=is_following_tag)
register.tag(display_action)
register.tag(follow_url)
register.tag(follow_all_url)
register.tag(actor_url)
register.simple_tag(takes_context=True)(activity_stream)
| IsFollowing |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 39522,
"end": 44186
} | class ____(StationaryKernelMixin, GenericKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
.. math::
k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
Adding a constant kernel is equivalent to adding a constant::
kernel = RBF() + ConstantKernel(constant_value=2)
is the same as::
kernel = RBF() + 2
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default=1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on `constant_value`.
If set to "fixed", `constant_value` cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = RBF() + ConstantKernel(constant_value=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3696
>>> gpr.predict(X[:1,:], return_std=True)
(array([606.1]), array([0.248]))
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter("constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full(
(_num_samples(X), _num_samples(Y)),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (
K,
np.full(
(_num_samples(X), _num_samples(X), 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
),
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
| ConstantKernel |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 25104,
"end": 29948
} | class ____(ModelOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_logits: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
intermediate_predicted_corners: Optional[torch.FloatTensor] = None
initial_reference_points: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:
"""
Generates the non-uniform Weighting Function W(n) for bounding box regression.
Args:
max_num_bins (int): Max number of the discrete bins.
up (Tensor): Controls upper bounds of the sequence,
where maximum offset is ±up * H / W.
reg_scale (float): Controls the curvature of the Weighting Function.
Larger values result in flatter weights near the central axis W(max_num_bins/2)=0
and steeper weights at both ends.
Returns:
Tensor: Sequence of Weighting Function.
"""
upper_bound1 = abs(up[0]) * abs(reg_scale)
upper_bound2 = abs(up[0]) * abs(reg_scale) * 2
step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))
left_values = [-((step) ** i) + 1 for i in range(max_num_bins // 2 - 1, 0, -1)]
right_values = [(step) ** i - 1 for i in range(1, max_num_bins // 2)]
values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]
values = torch.cat(values, 0)
return values
def distance2bbox(points, distance: torch.Tensor, reg_scale: float) -> torch.Tensor:
"""
Decodes edge-distances into bounding box coordinates.
Args:
points (`torch.Tensor`):
(batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]
distance (`torch.Tensor`):
(batch_size, num_boxes, 4) or (num_boxes, 4), representing distances from the point to the left, top, right, and bottom boundaries.
reg_scale (`float`):
Controls the curvature of the Weighting Function.
Returns:
`torch.Tensor`: Bounding boxes in (batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]
"""
reg_scale = abs(reg_scale)
top_left_x = points[..., 0] - (0.5 * reg_scale + distance[..., 0]) * (points[..., 2] / reg_scale)
top_left_y = points[..., 1] - (0.5 * reg_scale + distance[..., 1]) * (points[..., 3] / reg_scale)
bottom_right_x = points[..., 0] + (0.5 * reg_scale + distance[..., 2]) * (points[..., 2] / reg_scale)
bottom_right_y = points[..., 1] + (0.5 * reg_scale + distance[..., 3]) * (points[..., 3] / reg_scale)
bboxes = torch.stack([top_left_x, top_left_y, bottom_right_x, bottom_right_y], -1)
return corners_to_center_format(bboxes)
| DFineDecoderOutput |
python | wandb__wandb | wandb/vendor/pygments/lexers/scripting.py | {
"start": 5313,
"end": 7752
} | class ____(LuaLexer):
"""
For `MoonScript <http://moonscript.org>`_ source code.
.. versionadded:: 1.5
"""
name = "MoonScript"
aliases = ["moon", "moonscript"]
filenames = ["*.moon"]
mimetypes = ['text/x-moonscript', 'application/x-moonscript']
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
default('base'),
],
'base': [
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
(r'(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(->|=>)', Name.Function),
(r':[a-zA-Z_]\w*', Name.Variable),
(r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
(r'[;,]', Punctuation),
(r'[\[\]{}()]', Keyword.Type),
(r'[a-zA-Z_]\w*:', Name.Variable),
(words((
'class', 'extends', 'if', 'then', 'super', 'do', 'with',
'import', 'export', 'while', 'elseif', 'return', 'for', 'in',
'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch',
'break'), suffix=r'\b'),
Keyword),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(and|or|not)\b', Operator.Word),
(r'(self)\b', Name.Builtin.Pseudo),
(r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class),
(r'[A-Z]\w*', Name.Class), # proper name
(r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String.Single, '#pop'),
(".", String)
],
'dqs': [
('"', String.Double, '#pop'),
(".", String)
]
}
def get_tokens_unprocessed(self, text):
# set . as Operator instead of Punctuation
for index, token, value in LuaLexer.get_tokens_unprocessed(self, text):
if token == Punctuation and value == ".":
token = Operator
yield index, token, value
| MoonScriptLexer |
python | django__django | tests/decorators/test_http.py | {
"start": 6000,
"end": 7532
} | class ____(SimpleTestCase):
def test_wrapped_sync_function_is_not_coroutine_function(self):
def sync_view(request):
return HttpResponse()
wrapped_view = conditional_page(sync_view)
self.assertIs(iscoroutinefunction(wrapped_view), False)
def test_wrapped_async_function_is_coroutine_function(self):
async def async_view(request):
return HttpResponse()
wrapped_view = conditional_page(async_view)
self.assertIs(iscoroutinefunction(wrapped_view), True)
def test_conditional_page_decorator_successful(self):
@conditional_page
def sync_view(request):
response = HttpResponse()
response.content = b"test"
response["Cache-Control"] = "public"
return response
request = HttpRequest()
request.method = "GET"
response = sync_view(request)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.get("Etag"))
async def test_conditional_page_decorator_successful_async_view(self):
@conditional_page
async def async_view(request):
response = HttpResponse()
response.content = b"test"
response["Cache-Control"] = "public"
return response
request = HttpRequest()
request.method = "GET"
response = await async_view(request)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.get("Etag"))
| ConditionalPageTests |
python | kamyu104__LeetCode-Solutions | Python/game-of-nim.py | {
"start": 47,
"end": 223
} | class ____(object):
def nimGame(self, piles):
"""
:type piles: List[int]
:rtype: bool
"""
return reduce(operator.xor, piles, 0)
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 68606,
"end": 73089
} | class ____:
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
if not NUMPY_LT_2_0:
def test_unique_all(self):
values, indices, inverse_indices, counts = np.unique(
self.q,
return_index=True,
return_inverse=True,
return_counts=True,
equal_nan=False,
)
res = np.unique_all(self.q)
assert len(res) == 4
assert_array_equal(res.values, values)
assert_array_equal(res.indices, indices)
assert_array_equal(res.inverse_indices, inverse_indices)
assert_array_equal(res.counts, counts)
def test_unique_counts(self):
values, counts = np.unique(self.q, return_counts=True, equal_nan=False)
res = np.unique_counts(self.q)
assert len(res) == 2
assert_array_equal(res.values, values)
assert_array_equal(res.counts, counts)
def test_unique_inverse(self):
values, inverse_indices = np.unique(
self.q, return_inverse=True, equal_nan=False
)
res = np.unique_inverse(self.q)
assert len(res) == 2
assert_array_equal(res.values, values)
assert_array_equal(res.inverse_indices, inverse_indices)
def test_unique_values(self):
values = np.unique(self.q, equal_nan=False)
res = np.unique_values(self.q)
assert_array_equal(res, values)
@pytest.mark.parametrize("kwargs", ({}, dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
def test_setxor1d(self):
self.check2(np.setxor1d)
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@pytest.mark.skipif(not NUMPY_LT_2_4, reason="in1d was removed in numpy 2.4")
@pytest.mark.filterwarnings("ignore:`in1d` is deprecated. Use `np.isin` instead.")
def test_in1d(self):
self.check2(np.in1d, unit=None) # noqa: NPY201
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2) # noqa: NPY201
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2) # noqa: NPY201
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
| TestSetOpsFunctions |
python | tensorflow__tensorflow | tensorflow/python/module/module_test.py | {
"start": 13791,
"end": 13964
} | class ____(ReturnsNameScopeModule):
@module.Module.with_name_scope
def alternative_alternative_forward(self):
return get_name_scope()
| SubclassedReturnsNameScopeModule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.