language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
lazyprogrammer__machine_learning_examples
|
nlp_class2/rntn_tensorflow.py
|
{
"start": 1079,
"end": 7953
}
|
class ____:
def __init__(self, V, D, K, activation):
self.D = D
self.f = activation
# word embedding
We = init_weight(V, D)
# quadratic terms
W11 = np.random.randn(D, D, D) / np.sqrt(3*D)
W22 = np.random.randn(D, D, D) / np.sqrt(3*D)
W12 = np.random.randn(D, D, D) / np.sqrt(3*D)
# linear terms
W1 = init_weight(D, D)
W2 = init_weight(D, D)
# bias
bh = np.zeros(D)
# output layer
Wo = init_weight(D, K)
bo = np.zeros(K)
# make them tensorflow variables
self.We = tf.Variable(We.astype(np.float32))
self.W11 = tf.Variable(W11.astype(np.float32))
self.W22 = tf.Variable(W22.astype(np.float32))
self.W12 = tf.Variable(W12.astype(np.float32))
self.W1 = tf.Variable(W1.astype(np.float32))
self.W2 = tf.Variable(W2.astype(np.float32))
self.bh = tf.Variable(bh.astype(np.float32))
self.Wo = tf.Variable(Wo.astype(np.float32))
self.bo = tf.Variable(bo.astype(np.float32))
self.params = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo]
def fit(self, trees, lr=1e-2, mu=0.9, reg=1e-1, epochs=5):
train_ops = []
costs = []
predictions = []
all_labels = []
i = 0
N = len(trees)
print("Compiling ops")
for t in trees:
i += 1
sys.stdout.write("%d/%d\r" % (i, N))
sys.stdout.flush()
logits = self.get_output(t)
labels = get_labels(t)
all_labels.append(labels)
cost = self.get_cost(logits, labels, reg)
costs.append(cost)
prediction = tf.argmax(logits, 1)
predictions.append(prediction)
train_op = tf.train.MomentumOptimizer(lr, mu).minimize(cost)
train_ops.append(train_op)
# save for later so we don't have to recompile if we call score
self.predictions = predictions
self.all_labels = all_labels
self.saver = tf.train.Saver()
init = tf.initialize_all_variables()
actual_costs = []
per_epoch_costs = []
correct_rates = []
with tf.Session() as session:
session.run(init)
for i in range(epochs):
train_ops, costs, predictions, all_labels = shuffle(train_ops, costs, predictions, all_labels)
epoch_cost = 0
n_correct = 0
n_total = 0
j = 0
N = len(train_ops)
for train_op, cost, prediction, labels in zip(train_ops, costs, predictions, all_labels):
_, c, p = session.run([train_op, cost, prediction])
epoch_cost += c
actual_costs.append(c)
n_correct += np.sum(p == labels)
n_total += len(labels)
j += 1
if j % 10 == 0:
sys.stdout.write("j: %d, N: %d, c: %f\r" % (j, N, c))
sys.stdout.flush()
if np.isnan(c):
exit()
per_epoch_costs.append(epoch_cost)
correct_rates.append(n_correct / float(n_total))
self.save_path = self.saver.save(session, "tf_model.ckpt")
plt.plot(actual_costs)
plt.title("cost per train_op call")
plt.show()
plt.plot(per_epoch_costs)
plt.title("per epoch costs")
plt.show()
plt.plot(correct_rates)
plt.title("correct rates")
plt.show()
def get_cost(self, logits, labels, reg):
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels))
rcost = sum(tf.nn.l2_loss(p) for p in self.params)
cost += reg*rcost
return cost
# list_of_logits is an output!
# it is added to using post-order traversal
def get_output_recursive(self, tree, list_of_logits, is_root=True):
if tree.word is not None:
# this is a leaf node
x = tf.nn.embedding_lookup(self.We, [tree.word])
else:
# if tree.left is None or tree.right is None:
# print("This tree node has no word but also has no children:")
# display_tree(tree)
# print("")
# this node has children
x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False)
x2 = self.get_output_recursive(tree.right, list_of_logits, is_root=False)
x = self.f(
tensor_mul(self.D, x1, self.W11, x1) +
tensor_mul(self.D, x2, self.W22, x2) +
tensor_mul(self.D, x1, self.W12, x2) +
tf.matmul(x1, self.W1) +
tf.matmul(x2, self.W2) +
self.bh)
logits = tf.matmul(x, self.Wo) + self.bo
list_of_logits.append(logits)
return x
def get_output(self, tree):
logits = []
try:
self.get_output_recursive(tree, logits)
except Exception as e:
display_tree(tree)
raise e
return tf.concat(0, logits)
def score(self, trees):
if trees is None:
predictions = self.predictions
all_labels = self.all_labels
else:
# just build and run the predict_op for each tree
# and accumulate the total
predictions = []
all_labels = []
i = 0
N = len(trees)
print("Compiling ops")
for t in trees:
i += 1
sys.stdout.write("%d/%d\r" % (i, N))
sys.stdout.flush()
logits = self.get_output(t)
labels = get_labels(t)
all_labels.append(labels)
prediction = tf.argmax(logits, 1)
predictions.append(prediction)
n_correct = 0
n_total = 0
with tf.Session() as session:
self.saver.restore(session, "tf_model.ckpt")
for prediction, y in zip(predictions, all_labels):
p = session.run(prediction)
# print("pred:", p)
# print("label:", y)
# n_correct += np.sum(p == y)
n_correct += (p[-1] == y[-1]) # we only care about the root
n_total += len(y)
return float(n_correct) / n_total
def main():
train, test, word2idx = get_ptb_data()
train = train[:100]
test = test[:100]
V = len(word2idx)
D = 80
K = 5
model = RNTN(V, D, K, tf.nn.relu)
model.fit(train)
print("train accuracy:", model.score(None))
print("test accuracy:", model.score(test))
if __name__ == '__main__':
main()
|
RNTN
|
python
|
tiangolo__fastapi
|
scripts/notify_translations.py
|
{
"start": 1638,
"end": 1705
}
|
class ____(BaseModel):
id: str
url: str
body: str
|
Comment
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/views/private.py
|
{
"start": 29434,
"end": 29589
}
|
class ____(ProjectRedirectsMixin, DeleteViewWithMessage):
http_method_names = ["post"]
success_message = _("Redirect deleted")
|
ProjectRedirectsDelete
|
python
|
pytorch__pytorch
|
test/inductor/test_coordinate_descent_tuner.py
|
{
"start": 1449,
"end": 4084
}
|
class ____(TestCase):
def test_abs_function(self):
"""
The benchmark result is simply abs(XBLOCK - 15)
"""
tuner = CoordescTuner()
baseline_config = triton.Config({"XBLOCK": 1}, num_warps=8, num_stages=1)
def func(config):
return abs(config.kwargs["XBLOCK"] - 15)
best_config = tuner.autotune(func, baseline_config)
self.assertTrue(best_config.kwargs.get("XBLOCK") == 16, str(best_config))
def test_no_neighbors(self):
"""
Test the case that there is no available neighbor values for a field.
"""
# size hint for x being 1 limits the max XBLOCK we try to be 1
tuner = CoordescTuner(size_hints={"x": 1})
baseline_config = triton.Config({"XBLOCK": 1}, num_warps=8, num_stages=1)
def func(config):
return abs(config.kwargs["XBLOCK"] - 15)
best_config = tuner.autotune(func, baseline_config)
self.assertTrue(best_config.kwargs.get("XBLOCK") == 1, str(best_config))
def test_get_neighbour_values(self):
tuner = CoordescTuner()
neighbours = tuner.get_neighbour_values("num_stages", 2, radius=2)
self.assertEqual(set(neighbours), {1, 3, 4})
neighbours = tuner.get_neighbour_values("num_warps", 2, radius=2)
self.assertEqual(set(neighbours), {1, 4, 8})
def test_persistent_reduction(self):
def f(x):
return x / x.sum(dim=-1, keepdim=True)
with mock.patch.object(
CoordescTuner, "compare_config", mock_compare_config_prefer_larger_XBLOCK
):
x = torch.ones(2, 256).to(GPU_TYPE)
expected = f(x)
# the first call get correct result when cache miss. Don't know why yet
_ = torch.compile(f)(x)
actual = torch.compile(f)(x)
self.assertTrue(
torch.allclose(expected, actual, atol=1e-4, rtol=1e-4),
f"Expected:\n{expected}\nActual:\n{actual}",
)
def test_value_too_large(self):
# Simulate a reduction
size_hints = {"x": 2**20, "y": 2**20}
tuner = CoordescTuner(size_hints=size_hints)
max_block = TRITON_MAX_BLOCK
self.assertFalse(tuner.value_too_large("XBLOCK", max_block["X"]))
self.assertTrue(tuner.value_too_large("XBLOCK", max_block["X"] * 2))
self.assertFalse(tuner.value_too_large("R0_BLOCK", max_block["R0_"]))
self.assertTrue(tuner.value_too_large("R0_BLOCK", max_block["R0_"] * 2))
if __name__ == "__main__":
if IS_LINUX and HAS_GPU:
run_tests()
|
TestCoordinateDescentTuner
|
python
|
huggingface__transformers
|
src/transformers/models/swin2sr/modeling_swin2sr.py
|
{
"start": 35092,
"end": 35858
}
|
class ____(nn.Module):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int):
Scale factor. Supported scales: 2^n and 3.
in_channels (int):
Channel number of intermediate features.
out_channels (int):
Channel number of output features.
"""
def __init__(self, scale, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, (scale**2) * out_channels, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(scale)
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
return x
|
UpsampleOneStep
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/_mathtext.py
|
{
"start": 55615,
"end": 62570
}
|
class ____(Hlist):
"""
A character as close to the given width as possible.
When using a font with multiple width versions of some characters (such as
the BaKoMa fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c: str, width: float, state: ParserState, always: bool = False,
char_class: type[Char] = Char):
alternatives = state.fontset.get_sized_alternatives_for_symbol(state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
super().__init__([char])
self.width = char.width
def ship(box: Box, xy: tuple[float, float] = (0, 0)) -> Output:
"""
Ship out *box* at offset *xy*, converting it to an `Output`.
Since boxes can be inside of boxes inside of boxes, the main work of `ship`
is done by two mutually recursive routines, `hlist_out` and `vlist_out`,
which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store state as it
processes have become local variables here.
"""
ox, oy = xy
cur_v = 0.
cur_h = 0.
off_h = ox
off_v = oy + box.height
output = Output(box)
def clamp(value: float) -> float:
return -1e9 if value < -1e9 else +1e9 if value > +1e9 else value
def hlist_out(box: Hlist) -> None:
nonlocal cur_v, cur_h
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = cur_v
left_edge = cur_h
for p in box.children:
if isinstance(p, Char):
p.render(output, cur_h + off_h, cur_v + off_v)
cur_h += p.width
elif isinstance(p, Kern):
cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
cur_h += p.width
else:
edge = cur_h
cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
hlist_out(p)
elif isinstance(p, Vlist):
# p.vpack(box.height + box.depth, 'exactly')
vlist_out(p)
else:
assert False, "unreachable code"
cur_h = edge + p.width
cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_height):
rule_height = box.height
if np.isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
cur_v = base_line + rule_depth
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
cur_v = base_line
cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_width += cur_g
cur_h += rule_width
def vlist_out(box: Vlist) -> None:
nonlocal cur_v, cur_h
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
left_edge = cur_h
cur_v -= box.height
top_edge = cur_v
for p in box.children:
if isinstance(p, Kern):
cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
cur_v += p.height + p.depth
else:
cur_v += p.height
cur_h = left_edge + p.shift_amount
save_v = cur_v
p.width = box.width
if isinstance(p, Hlist):
hlist_out(p)
elif isinstance(p, Vlist):
vlist_out(p)
else:
assert False, "unreachable code"
cur_v = save_v + p.depth
cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
cur_v += rule_height
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_height += cur_g
cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError(
"Internal mathtext error: Char node found in vlist")
assert isinstance(box, Hlist)
hlist_out(box)
return output
##############################################################################
# PARSER
def Error(msg: str) -> ParserElement:
"""Helper class to raise parser errors."""
def raise_error(s: str, loc: int, toks: ParseResults) -> T.Any:
raise ParseFatalException(s, loc, msg)
return Empty().set_parse_action(raise_error)
|
AutoWidthChar
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/ternary/_caxis.py
|
{
"start": 235,
"end": 53738
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary"
_path_str = "layout.ternary.caxis"
_valid_props = {
"color",
"dtick",
"exponentformat",
"gridcolor",
"griddash",
"gridwidth",
"hoverformat",
"labelalias",
"layer",
"linecolor",
"linewidth",
"min",
"minexponent",
"nticks",
"separatethousands",
"showexponent",
"showgrid",
"showline",
"showticklabels",
"showtickprefix",
"showticksuffix",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"uirevision",
}
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def layer(self):
"""
Sets the layer on which this axis is displayed. If *above
traces*, this axis is displayed above all the subplot's traces
If *below traces*, this axis is displayed below all the
subplot's traces, but above the grid lines. Useful when used
together with scatter-like traces with `cliponaxis` set to
False to show markers and/or text nodes above this axis.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def min(self):
"""
The minimum value visible on this axis. The maximum is
determined by the sum minus the minimum values of the other two
axes. The full view corresponds to all the minima set to zero.
The 'min' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["min"]
@min.setter
def min(self, val):
self["min"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.caxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.ternary.caxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.ternary.caxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.ternary.caxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.ternary.caxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.ternary.caxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.caxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.ternary.caxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.caxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.layout.ternary.caxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `min`, and
`title` if in `editable: true` configuration. Defaults to
`ternary<N>.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The maximum is
determined by the sum minus the minimum values of the
other two axes. The full view corresponds to all the
minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.ternary.
caxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.tern
ary.caxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.ternary.caxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.caxis.Title
` instance or dict with compatible properties
uirevision
Controls persistence of user-driven changes in axis
`min`, and `title` if in `editable: true`
configuration. Defaults to `ternary<N>.uirevision`.
"""
def __init__(
self,
arg=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
griddash=None,
gridwidth=None,
hoverformat=None,
labelalias=None,
layer=None,
linecolor=None,
linewidth=None,
min=None,
minexponent=None,
nticks=None,
separatethousands=None,
showexponent=None,
showgrid=None,
showline=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
uirevision=None,
**kwargs,
):
"""
Construct a new Caxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.Caxis`
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The maximum is
determined by the sum minus the minimum values of the
other two axes. The full view corresponds to all the
minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.ternary.
caxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.tern
ary.caxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.ternary.caxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.caxis.Title
` instance or dict with compatible properties
uirevision
Controls persistence of user-driven changes in axis
`min`, and `title` if in `editable: true`
configuration. Defaults to `ternary<N>.uirevision`.
Returns
-------
Caxis
"""
super().__init__("caxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.Caxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.Caxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("layer", arg, layer)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("min", arg, min)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("uirevision", arg, uirevision)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Caxis
|
python
|
pytorch__pytorch
|
test/distributed/pipelining/test_pipe.py
|
{
"start": 2025,
"end": 3442
}
|
class ____(TestCase):
@parametrize("ModelClass", [ExampleCode, MultiMLP, ModelWithParamAlias])
def test_model_split(self, ModelClass):
mod = ModelClass()
x = torch.randn(microbatch_size, d_hid)
y = torch.randn(microbatch_size, d_hid)
pipe = pipeline(
mod,
mb_args=(x, y),
)
assert pipe.num_stages == EXPECTED_N_STAGES[ModelClass], (
f"nstages = {pipe.num_stages}, expect {EXPECTED_N_STAGES[ModelClass]}"
)
ref_out = mod(x, y)
out = pipe(x, y)[0]
torch.testing.assert_close(out, ref_out)
print(f"equivalence test passed {torch.sum(out)} ref {torch.sum(ref_out)}")
# Check qualname
# state_dict.keys include both parameters and persistent buffers
old_names = set(mod.state_dict().keys())
new_names = set()
for idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(idx)
stage_fqns = set(stage_mod.state_dict().keys())
assert stage_fqns.issubset(old_names)
new_names.update(stage_fqns)
if CHECK_FQN_SET_EQUALITY:
assert old_names == new_names, f"""
old names {old_names}
new names {new_names}
"""
print("Qualname check passed")
instantiate_parametrized_tests(PipeTests)
if __name__ == "__main__":
run_tests()
|
PipeTests
|
python
|
ansible__ansible
|
lib/ansible/playbook/included_file.py
|
{
"start": 1378,
"end": 12027
}
|
class ____:
def __init__(self, filename, args, vars, task, is_role: bool = False) -> None:
self._filename = filename
self._args = args
self._vars = vars
self._task = task
self._hosts: list[Host] = []
self._is_role = is_role
self._results: list[_RawTaskResult] = []
def add_host(self, host: Host) -> None:
if host not in self._hosts:
self._hosts.append(host)
return
raise ValueError()
def __eq__(self, other):
if not isinstance(other, IncludedFile):
return False
return (other._filename == self._filename and
other._args == self._args and
other._vars == self._vars and
other._task._uuid == self._task._uuid and
other._task._parent._uuid == self._task._parent._uuid)
def __repr__(self):
return "%s (args=%s vars=%s): %s" % (self._filename, self._args, self._vars, self._hosts)
@staticmethod
def process_include_results(
results: list[_RawTaskResult],
iterator,
loader: DataLoader,
variable_manager: VariableManager,
) -> list[IncludedFile]:
included_files: list[IncludedFile] = []
task_vars_cache: dict[tuple, dict] = {}
for res in results:
original_host = res.host
original_task = res.task
if original_task.action in C._ACTION_ALL_INCLUDES:
if original_task.loop:
if 'results' not in res._return_data:
continue
include_results = res._loop_results
else:
include_results = [res._return_data]
for include_result in include_results:
# if the task result was skipped or failed, continue
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
continue
cache_key = (iterator._play, original_host, original_task)
try:
task_vars = task_vars_cache[cache_key]
except KeyError:
task_vars = task_vars_cache[cache_key] = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task)
include_args = include_result.pop('include_args', dict())
special_vars = {}
loop_var = include_result.get('ansible_loop_var', 'item')
index_var = include_result.get('ansible_index_var')
if loop_var in include_result:
task_vars[loop_var] = special_vars[loop_var] = include_result[loop_var]
task_vars['ansible_loop_var'] = special_vars['ansible_loop_var'] = loop_var
if index_var and index_var in include_result:
task_vars[index_var] = special_vars[index_var] = include_result[index_var]
task_vars['ansible_index_var'] = special_vars['ansible_index_var'] = index_var
if '_ansible_item_label' in include_result:
task_vars['_ansible_item_label'] = special_vars['_ansible_item_label'] = include_result['_ansible_item_label']
if 'ansible_loop' in include_result:
task_vars['ansible_loop'] = special_vars['ansible_loop'] = include_result['ansible_loop']
if original_task.no_log and '_ansible_no_log' not in include_args:
task_vars['_ansible_no_log'] = special_vars['_ansible_no_log'] = original_task.no_log
# get search path for this task to pass to lookup plugins that may be used in pathing to
# the included file
task_vars['ansible_search_path'] = original_task.get_search_path()
# ensure basedir is always in (dwim already searches here but we need to display it)
if loader.get_basedir() not in task_vars['ansible_search_path']:
task_vars['ansible_search_path'].append(loader.get_basedir())
templar = TemplateEngine(loader=loader, variables=task_vars)
if original_task.action in C._ACTION_INCLUDE_TASKS:
include_file = None
if original_task._parent:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = original_task._parent
cumulative_path = None
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
if isinstance(parent_include, IncludeRole):
parent_include_dir = parent_include._role_path
else:
try:
# FUTURE: Since the parent include path has already been resolved, it should be used here.
# Unfortunately it's not currently stored anywhere, so it must be calculated again.
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
except AnsibleError as e:
parent_include_dir = ''
display.warning(
'Templating the path of the parent %s failed. The path to the '
'included file may not be found. '
'The error was: %s.' % (original_task.action, to_text(e))
)
if cumulative_path is not None and not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
else:
cumulative_path = parent_include_dir
include_target = include_result['include']
if original_task._role:
dirname = 'handlers' if isinstance(original_task, Handler) else 'tasks'
new_basedir = os.path.join(original_task._role._role_path, dirname, cumulative_path)
candidates = [
loader.path_dwim_relative(original_task._role._role_path, dirname, include_target, is_role=True),
loader.path_dwim_relative(new_basedir, dirname, include_target, is_role=True)
]
for include_file in candidates:
try:
# may throw OSError
os.stat(include_file)
# or select the task file if it exists
break
except OSError:
pass
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
break
else:
parent_include = parent_include._parent
if include_file is None:
if original_task._role:
include_target = include_result['include']
include_file = loader.path_dwim_relative(
original_task._role._role_path,
'handlers' if isinstance(original_task, Handler) else 'tasks',
include_target,
is_role=True)
else:
include_file = loader.path_dwim(include_result['include'])
inc_file = IncludedFile(include_file, include_args, special_vars, original_task)
else:
# template the included role's name here
role_name = include_args.pop('name', include_args.pop('role', None))
new_task = original_task.copy()
new_task.post_validate(templar=templar)
new_task._role_name = role_name
for from_arg in new_task.FROM_ARGS:
if from_arg in include_args:
from_key = from_arg.removesuffix('_from')
new_task._from_files[from_key] = include_args.get(from_arg)
inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True)
idx = 0
orig_inc_file = inc_file
while 1:
try:
pos = included_files[idx:].index(orig_inc_file)
# pos is relative to idx since we are slicing
# use idx + pos due to relative indexing
inc_file = included_files[idx + pos]
except ValueError:
included_files.append(orig_inc_file)
inc_file = orig_inc_file
try:
inc_file.add_host(original_host)
inc_file._results.append(res)
except ValueError:
# The host already exists for this include, advance forward, this is a new include
idx += pos + 1
else:
break
return included_files
|
IncludedFile
|
python
|
openai__openai-python
|
src/openai/types/vector_store_search_params.py
|
{
"start": 445,
"end": 1044
}
|
class ____(TypedDict, total=False):
query: Required[Union[str, SequenceNotStr[str]]]
"""A query string for a search"""
filters: Filters
"""A filter to apply based on file attributes."""
max_num_results: int
"""The maximum number of results to return.
This number should be between 1 and 50 inclusive.
"""
ranking_options: RankingOptions
"""Ranking options for search."""
rewrite_query: bool
"""Whether to rewrite the natural language query for vector search."""
Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
|
VectorStoreSearchParams
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_chebyshev.py
|
{
"start": 17937,
"end": 20629
}
|
class ____:
def test_chebfromroots(self):
res = cheb.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
tgt = [0] * i + [1]
res = cheb.chebfromroots(roots) * 2**(i - 1)
assert_almost_equal(trim(res), trim(tgt))
def test_chebroots(self):
assert_almost_equal(cheb.chebroots([1]), [])
assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = cheb.chebroots(cheb.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, cheb.chebtrim, coef, -1)
# Test results
assert_equal(cheb.chebtrim(coef), coef[:-1])
assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
assert_equal(cheb.chebtrim(coef, 2), [0])
def test_chebline(self):
assert_equal(cheb.chebline(3, 4), [3, 4])
def test_cheb2poly(self):
for i in range(10):
assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i])
def test_poly2cheb(self):
for i in range(10):
assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)[1:-1]
tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x))
res = cheb.chebweight(x)
assert_almost_equal(res, tgt)
def test_chebpts1(self):
# test exceptions
assert_raises(ValueError, cheb.chebpts1, 1.5)
assert_raises(ValueError, cheb.chebpts1, 0)
# test points
tgt = [0]
assert_almost_equal(cheb.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(cheb.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(cheb.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(cheb.chebpts1(4), tgt)
def test_chebpts2(self):
# test exceptions
assert_raises(ValueError, cheb.chebpts2, 1.5)
assert_raises(ValueError, cheb.chebpts2, 1)
# test points
tgt = [-1, 1]
assert_almost_equal(cheb.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(cheb.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
|
TestMisc
|
python
|
django-extensions__django-extensions
|
django_extensions/collision_resolvers.py
|
{
"start": 6262,
"end": 6575
}
|
class ____(AppNameSuffixCR, InstalledAppsOrderCR):
"""
Collision resolver which is mixin of AppNameSuffixCR and InstalledAppsOrderCR.
In case of collisions he sets aliases like AppNameSuffixCR, but sets default model using InstalledAppsOrderCR.
""" # noqa: E501
pass
|
AppNameSuffixCustomOrderCR
|
python
|
numba__numba
|
numba/tests/test_optional.py
|
{
"start": 1140,
"end": 6502
}
|
class ____(TestCase):
_numba_parallel_test_ = False
def test_return_double_or_none(self):
pyfunc = return_double_or_none
cfunc = njit((types.boolean,))(pyfunc)
for v in [True, False]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_return_different_statement(self):
pyfunc = return_different_statement
cfunc = njit((types.boolean,))(pyfunc)
for v in [True, False]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_return_bool_optional_or_none(self):
pyfunc = return_bool_optional_or_none
cfunc = njit((types.int32, types.int32,))(pyfunc)
for x, y in itertools.product((0, 1, 2), (0, 1)):
self.assertPreciseEqual(pyfunc(x, y), cfunc(x, y))
def test_is_this_a_none(self):
pyfunc = is_this_a_none
cfunc = njit((types.intp,))(pyfunc)
for v in [-1, 0, 1, 2]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_is_this_a_none_objmode(self):
pyfunc = is_this_a_none
cfunc = jit((types.intp,), forceobj=True)(pyfunc)
self.assertTrue(cfunc.overloads[cfunc.signatures[0]].objectmode)
for v in [-1, 0, 1, 2]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_a_is_b_intp(self):
pyfunc = a_is_b
cfunc = njit((types.intp, types.intp))(pyfunc)
# integer identity relies on `==`
self.assertTrue(cfunc(1, 1))
self.assertFalse(cfunc(1, 2))
def test_a_is_not_b_intp(self):
pyfunc = a_is_not_b
cfunc = njit((types.intp, types.intp))(pyfunc)
# integer identity relies on `==`
self.assertFalse(cfunc(1, 1))
self.assertTrue(cfunc(1, 2))
def test_optional_float(self):
def pyfunc(x, y):
if y is None:
return x
else:
return x + y
cfunc = njit("(float64, optional(float64))")(pyfunc)
self.assertAlmostEqual(pyfunc(1., 12.3), cfunc(1., 12.3))
self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None))
def test_optional_array(self):
def pyfunc(x, y):
if y is None:
return x
else:
y[0] += x
return y[0]
cfunc = njit("(float32, optional(float32[:]))")(pyfunc)
cy = np.array([12.3], dtype=np.float32)
py = cy.copy()
self.assertAlmostEqual(pyfunc(1., py), cfunc(1., cy))
np.testing.assert_almost_equal(py, cy)
self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None))
def test_optional_array_error(self):
def pyfunc(y):
return y[0]
cfunc = njit("(optional(int32[:]),)")(pyfunc)
with self.assertRaises(TypeError) as raised:
cfunc(None)
self.assertIn('expected array(int32, 1d, A), got None',
str(raised.exception))
y = np.array([0xabcd], dtype=np.int32)
self.assertEqual(cfunc(y), pyfunc(y))
def test_optional_array_attribute(self):
"""
Check that we can access attribute of an optional
"""
def pyfunc(arr, do_it):
opt = None
if do_it: # forces `opt` to be an optional of arr
opt = arr
return opt.shape[0]
cfunc = njit(pyfunc)
arr = np.arange(5)
self.assertEqual(pyfunc(arr, True), cfunc(arr, True))
def test_assign_to_optional(self):
"""
Check that we can assign to a variable of optional type
"""
@njit
def make_optional(val, get_none):
if get_none:
ret = None
else:
ret = val
return ret
@njit
def foo(val, run_second):
a = make_optional(val, True)
if run_second:
a = make_optional(val, False)
return a
self.assertIsNone(foo(123, False))
self.assertEqual(foo(231, True), 231)
def test_optional_thru_omitted_arg(self):
"""
Issue 1868
"""
def pyfunc(x=None):
if x is None:
x = 1
return x
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
self.assertEqual(pyfunc(3), cfunc(3))
def test_optional_unpack(self):
"""
Issue 2171
"""
def pyfunc(x):
if x is None:
return
else:
a, b = x
return a, b
tup = types.Tuple([types.intp] * 2)
opt_tup = types.Optional(tup)
sig = (opt_tup,)
cfunc = njit(sig)(pyfunc)
self.assertEqual(pyfunc(None), cfunc(None))
self.assertEqual(pyfunc((1, 2)), cfunc((1, 2)))
def test_many_optional_none_returns(self):
"""
Issue #4058
"""
@njit
def foo(maybe):
lx = None
if maybe:
lx = 10
return 1, lx
def work():
tmp = []
for _ in range(20000):
maybe = False
_ = foo(maybe)
# this caused "Fatal Python error: deallocating None" as there was no
# incref being made on the returned None.
work()
if __name__ == '__main__':
unittest.main()
|
TestOptional
|
python
|
huggingface__transformers
|
src/transformers/models/phi/modular_phi.py
|
{
"start": 12680,
"end": 12914
}
|
class ____(LlamaForTokenClassification):
pass
__all__ = [
"PhiPreTrainedModel", # noqa: F822
"PhiModel",
"PhiForCausalLM",
"PhiForSequenceClassification",
"PhiForTokenClassification",
]
|
PhiForTokenClassification
|
python
|
eventlet__eventlet
|
eventlet/greenthread.py
|
{
"start": 6605,
"end": 13370
}
|
class ____(greenlet.greenlet):
"""The GreenThread class is a type of Greenlet which has the additional
property of being able to retrieve the return value of the main function.
Do not construct GreenThread objects directly; call :func:`spawn` to get one.
"""
def __init__(self, parent):
greenlet.greenlet.__init__(self, self.main, parent)
self._exit_event = event.Event()
self._resolving_links = False
self._exit_funcs = None
def __await__(self):
"""
Enable ``GreenThread``s to be ``await``ed in ``async`` functions.
"""
from eventlet.hubs.asyncio import Hub
hub = hubs.get_hub()
if not isinstance(hub, Hub):
raise RuntimeError(
"This API only works with eventlet's asyncio hub. "
+ "To use it, set an EVENTLET_HUB=asyncio environment variable."
)
future = hub.loop.create_future()
# When the Future finishes, check if it was due to cancellation:
def got_future_result(future):
if future.cancelled() and not self.dead:
# GreenThread is still running, so kill it:
self.kill()
future.add_done_callback(got_future_result)
# When the GreenThread finishes, set its result on the Future:
def got_gthread_result(gthread):
if future.done():
# Can't set values any more.
return
try:
# Should return immediately:
result = gthread.wait()
future.set_result(result)
except GreenletExit:
future.cancel()
except BaseException as e:
future.set_exception(e)
self.link(got_gthread_result)
return future.__await__()
def wait(self):
""" Returns the result of the main function of this GreenThread. If the
result is a normal return value, :meth:`wait` returns it. If it raised
an exception, :meth:`wait` will raise the same exception (though the
stack trace will unavoidably contain some frames from within the
greenthread module)."""
return self._exit_event.wait()
def link(self, func, *curried_args, **curried_kwargs):
""" Set up a function to be called with the results of the GreenThread.
The function must have the following signature::
def func(gt, [curried args/kwargs]):
When the GreenThread finishes its run, it calls *func* with itself
and with the `curried arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied
at link-time. If the function wants to retrieve the result of the GreenThread,
it should call wait() on its first argument.
Note that *func* is called within execution context of
the GreenThread, so it is possible to interfere with other linked
functions by doing things like switching explicitly to another
greenthread.
"""
if self._exit_funcs is None:
self._exit_funcs = deque()
self._exit_funcs.append((func, curried_args, curried_kwargs))
if self._exit_event.ready():
self._resolve_links()
def unlink(self, func, *curried_args, **curried_kwargs):
""" remove linked function set by :meth:`link`
Remove successfully return True, otherwise False
"""
if not self._exit_funcs:
return False
try:
self._exit_funcs.remove((func, curried_args, curried_kwargs))
return True
except ValueError:
return False
def main(self, function, args, kwargs):
try:
result = function(*args, **kwargs)
except:
self._exit_event.send_exception(*sys.exc_info())
self._resolve_links()
raise
else:
self._exit_event.send(result)
self._resolve_links()
def _resolve_links(self):
# ca and ckw are the curried function arguments
if self._resolving_links:
return
if not self._exit_funcs:
return
self._resolving_links = True
try:
while self._exit_funcs:
f, ca, ckw = self._exit_funcs.popleft()
f(self, *ca, **ckw)
finally:
self._resolving_links = False
def kill(self, *throw_args):
"""Kills the greenthread using :func:`kill`. After being killed
all calls to :meth:`wait` will raise *throw_args* (which default
to :class:`greenlet.GreenletExit`)."""
return kill(self, *throw_args)
def cancel(self, *throw_args):
"""Kills the greenthread using :func:`kill`, but only if it hasn't
already started running. After being canceled,
all calls to :meth:`wait` will raise *throw_args* (which default
to :class:`greenlet.GreenletExit`)."""
return cancel(self, *throw_args)
def cancel(g, *throw_args):
"""Like :func:`kill`, but only terminates the greenthread if it hasn't
already started execution. If the grenthread has already started
execution, :func:`cancel` has no effect."""
if not g:
kill(g, *throw_args)
def kill(g, *throw_args):
"""Terminates the target greenthread by raising an exception into it.
Whatever that greenthread might be doing; be it waiting for I/O or another
primitive, it sees an exception right away.
By default, this exception is GreenletExit, but a specific exception
may be specified. *throw_args* should be the same as the arguments to
raise; either an exception instance or an exc_info tuple.
Calling :func:`kill` causes the calling greenthread to cooperatively yield.
"""
if g.dead:
return
hub = hubs.get_hub()
if not g:
# greenlet hasn't started yet and therefore throw won't work
# on its own; semantically we want it to be as though the main
# method never got called
def just_raise(*a, **kw):
if throw_args:
raise throw_args[1].with_traceback(throw_args[2])
else:
raise greenlet.GreenletExit()
g.run = just_raise
if isinstance(g, GreenThread):
# it's a GreenThread object, so we want to call its main
# method to take advantage of the notification
try:
g.main(just_raise, (), {})
except:
pass
current = getcurrent()
if current is not hub.greenlet:
# arrange to wake the caller back up immediately
hub.ensure_greenlet()
hub.schedule_call_global(0, current.switch)
g.throw(*throw_args)
|
GreenThread
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 120590,
"end": 123694
}
|
class ____(TypedDict, total=False):
type: Required[Literal['dataclass']]
cls: Required[type[Any]]
generic_origin: type[Any]
schema: Required[CoreSchema]
fields: Required[list[str]]
cls_name: str
post_init: bool # default: False
revalidate_instances: Literal['always', 'never', 'subclass-instances'] # default: 'never'
strict: bool # default: False
frozen: bool # default False
ref: str
metadata: dict[str, Any]
serialization: SerSchema
slots: bool
config: CoreConfig
def dataclass_schema(
cls: type[Any],
schema: CoreSchema,
fields: list[str],
*,
generic_origin: type[Any] | None = None,
cls_name: str | None = None,
post_init: bool | None = None,
revalidate_instances: Literal['always', 'never', 'subclass-instances'] | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
frozen: bool | None = None,
slots: bool | None = None,
config: CoreConfig | None = None,
) -> DataclassSchema:
"""
Returns a schema for a dataclass. As with `ModelSchema`, this schema can only be used as a field within
another schema, not as the root type.
Args:
cls: The dataclass type, used to perform subclass checks
schema: The schema to use for the dataclass fields
fields: Fields of the dataclass, this is used in serialization and in validation during re-validation
and while validating assignment
generic_origin: The origin type used for this dataclass, if it's a parametrized generic. Ex,
if this model schema represents `SomeDataclass[int]`, generic_origin is `SomeDataclass`
cls_name: The name to use in error locs, etc; this is useful for generics (default: `cls.__name__`)
post_init: Whether to call `__post_init__` after validation
revalidate_instances: whether instances of models and dataclasses (including subclass instances)
should re-validate defaults to config.revalidate_instances, else 'never'
strict: Whether to require an exact instance of `cls`
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
frozen: Whether the dataclass is frozen
slots: Whether `slots=True` on the dataclass, means each field is assigned independently, rather than
simply setting `__dict__`, default false
"""
return _dict_not_none(
type='dataclass',
cls=cls,
generic_origin=generic_origin,
fields=fields,
cls_name=cls_name,
schema=schema,
post_init=post_init,
revalidate_instances=revalidate_instances,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
frozen=frozen,
slots=slots,
config=config,
)
|
DataclassSchema
|
python
|
huggingface__transformers
|
src/transformers/models/roberta/modeling_roberta.py
|
{
"start": 42745,
"end": 47423
}
|
class ____(RobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(reshaped_logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
RobertaForMultipleChoice
|
python
|
PyCQA__pylint
|
tests/functional/n/no/no_member_dataclasses.py
|
{
"start": 497,
"end": 731
}
|
class ____(metaclass=ABCMeta):
type: str
@abstractmethod
def to_dict(self) -> Dict:
"""
Serializes given DeploymentState instance to Dict.
:return:
"""
@dataclass(frozen=True)
|
DeploymentState
|
python
|
pytorch__pytorch
|
torch/utils/_sympy/symbol.py
|
{
"start": 569,
"end": 3737
}
|
class ____(Enum):
SIZE = auto()
FLOAT = auto()
UNBACKED_INT = auto()
UNBACKED_FLOAT = auto()
# Inductor: The intermediates in inner_fn tmp0, one generated per ops call.
# If one of these shows up in an indexing expression, that means an
# indirect load is happening.
TMP = auto()
# Inductor: Placeholder variable that is later replaced with TMP
INDIRECT = auto()
# Inductor: Some size expressions are replaced with a precomputed size ps0
# which is computed host side, and then directly reused in the kernel, so
# we don't repeatedly recompute it on device.
PRECOMPUTED_SIZE = auto()
# Inductor: An indexing variable i0 in loops IR which ranges over non-reduced
# dim in the loop
INDEX = auto()
# Inductor: A reduction indexing (r0, r1) variables in loops IR which ranges over
# reduced dim(s) in the loop
R0_INDEX = auto()
R1_INDEX = auto()
# Inductor: In templated kernels torch._inductor.kernel, we have a hook to
# store the final output and append epilogue fusions. To do this, we must
# know what the indexes the outputs range over. NB: These will also
# advertise as INDEX, this is... probably OK?
TEMPLATE_INDEX = auto()
# Inductor: iteration domain for blockIdx.x/blockIdx.y
XBLOCK = auto()
YBLOCK = auto()
ZBLOCK = auto()
# Inductor: this is used solely for dynamic_reshape_indexer
VIEW = auto()
# Alternate (non-modular) indexing used in halide kernels
HALIDE = auto()
# Invariant: there must not be a prefix which is a prefix of another string,
# as this introduces ambiguity
prefix_str = {
SymT.SIZE: "s", # integer
SymT.UNBACKED_INT: "u", # integer
# Prefix z here is chosen to avoid false aliasing in symbol_is_type test
# DO NOT add a "z" type. You also need to avoid conflicts on these
# prefixes but this is somewhat easier to manage
SymT.FLOAT: "zf",
SymT.UNBACKED_FLOAT: "zuf",
SymT.TMP: "tmp",
SymT.PRECOMPUTED_SIZE: "ps",
SymT.INDEX: "i",
SymT.R0_INDEX: "r0_",
SymT.R1_INDEX: "r1_",
SymT.TEMPLATE_INDEX: "idx",
SymT.XBLOCK: "x",
SymT.YBLOCK: "y",
SymT.ZBLOCK: "z",
SymT.INDIRECT: "indirect", # false aliasing?
SymT.VIEW: "view",
SymT.HALIDE: "h",
}
def make_symbol(prefix: SymT, idx: int, **kwargs) -> sympy.Symbol:
# TODO: maybe put the assumptions here directly
return sympy.Symbol(f"{prefix_str[prefix]}{idx}", **kwargs)
# This type is a little wider than it should be, because free_symbols says
# that it contains Basic, rather than Symbol
def symbol_is_type(sym: sympy.Basic, prefix: SymT | Iterable[SymT]) -> bool:
if not isinstance(sym, sympy.Symbol):
raise AssertionError("expected sympy.Symbol")
name_str = sym.name.lower() # Match capitalized names like XBLOCK, RBLOCK
if isinstance(prefix, SymT):
return name_str.startswith(prefix_str[prefix])
else:
return name_str.startswith(tuple(prefix_str[p] for p in prefix))
def free_symbol_is_type(e: sympy.Expr, prefix: SymT | Iterable[SymT]) -> bool:
return any(symbol_is_type(v, prefix) for v in e.free_symbols)
|
SymT
|
python
|
bokeh__bokeh
|
src/bokeh/document/json.py
|
{
"start": 2156,
"end": 2307
}
|
class ____(TypedDict):
kind: Literal["ColumnDataChanged"]
model: Ref
attr: str
data: DataDict
cols: list[str] | None
|
ColumnDataChanged
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/rds.py
|
{
"start": 17495,
"end": 19721
}
|
class ____(RdsBaseOperator):
"""
Cancels an export task in progress that is exporting a snapshot to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCancelExportTaskOperator`
:param export_task_identifier: The identifier of the snapshot export task to cancel
:param wait_for_completion: If True, waits for DB snapshot export to cancel. (default: True)
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields = aws_template_fields(
"export_task_identifier",
)
def __init__(
self,
*,
export_task_identifier: str,
wait_for_completion: bool = True,
check_interval: int = 30,
max_attempts: int = 40,
**kwargs,
):
super().__init__(**kwargs)
self.export_task_identifier = export_task_identifier
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.max_attempts = max_attempts
def execute(self, context: Context) -> str:
self.log.info("Canceling export task %s", self.export_task_identifier)
cancel_export = self.hook.conn.cancel_export_task(
ExportTaskIdentifier=self.export_task_identifier,
)
if self.wait_for_completion:
self.hook.wait_for_export_task_state(
self.export_task_identifier,
target_state="canceled",
check_interval=self.check_interval,
max_attempts=self.max_attempts,
)
return json.dumps(cancel_export, default=str)
|
RdsCancelExportTaskOperator
|
python
|
has2k1__plotnine
|
plotnine/themes/elements/element_rect.py
|
{
"start": 229,
"end": 1767
}
|
class ____(element_base):
"""
Theme element: Rectangle
Used for backgrounds and borders
Parameters
----------
fill : str | tuple
Rectangle background color
color : str | tuple
Line color
colour : str | tuple
Alias of color
size : float
Line thickness
kwargs : dict
Parameters recognised by
[](`~matplotlib.patches.Rectangle`).
In some cases you can use the fancy parameters from
[](`~matplotlib.patches.FancyBboxPatch`).
"""
def __init__(
self,
fill: Optional[
str
| tuple[float, float, float]
| tuple[float, float, float, float]
] = None,
color: Optional[
str
| tuple[float, float, float]
| tuple[float, float, float, float]
] = None,
size: Optional[float] = None,
linetype: Optional[str | Sequence[int]] = None,
colour: Optional[
str
| tuple[float, float, float]
| tuple[float, float, float, float]
] = None,
**kwargs: Any,
):
super().__init__()
self.properties.update(**kwargs)
color = color if color else colour
if fill:
self.properties["facecolor"] = fill
if color:
self.properties["edgecolor"] = color
if size is not None:
self.properties["linewidth"] = size
if linetype:
self.properties["linestyle"] = linetype
|
element_rect
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pep8_naming/N802.py
|
{
"start": 716,
"end": 852
}
|
class ____(NodeTransformer):
def visit_Constant(self, node):
pass
from http.server import BaseHTTPRequestHandler
|
Transformer
|
python
|
yandexdataschool__Practical_RL
|
week04_approx_rl/test_td_loss/compute_td_loss.py
|
{
"start": 481,
"end": 6202
}
|
class ____(nn.Module):
"""
An nn.Module, which outputs a value which does not depend on its input.
Designed to be used for testing the compute_td_loss function.
"""
def __init__(self, output_q_values: torch.Tensor):
super().__init__()
assert output_q_values.dtype == torch.float, output_q_values.dtype
assert output_q_values.ndim == 2, output_q_values.shape
self.output_q_values = nn.Parameter(output_q_values)
def forward(self, state):
return torch.clone(self.output_q_values)
@torch.no_grad()
def test_is_done_is_used(compute_td_loss: ComputeTdLossProtocol):
"""
Tries to catch the error when compute_td_loss ignores its is_done argument.
"""
states = torch.empty(1)
actions = torch.tensor([0])
rewards = torch.tensor([1], dtype=torch.float)
is_done_first = torch.tensor([True])
is_done_second = torch.tensor([False])
next_states = torch.empty(1)
gamma = 0.99
q_values_agent = torch.tensor([[1, 1, 1]], dtype=torch.float)
q_values_target_network = torch.tensor([[1, 1, 1]], dtype=torch.float)
agent = MockAgent(q_values_agent)
target_network = MockAgent(q_values_target_network)
loss_kwargs = dict(
states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
agent=agent,
target_network=target_network,
gamma=gamma,
)
loss_first = compute_td_loss(is_done=is_done_first, **loss_kwargs).item()
loss_second = compute_td_loss(is_done=is_done_second, **loss_kwargs).item()
abs_diff = abs(loss_first - loss_second)
if abs_diff > 0.5:
msg = "compute_td_loss returned close values for different is_done inputs"
assert abs(loss_first - loss_second) > 0.5, msg
@torch.no_grad()
def test_compute_td_loss_vanilla(compute_td_loss: ComputeTdLossProtocol):
"""
Checks compute_td_loss on manually precomputed examples.
Note: this is a test for vanilla compute_td_loss
and it should NOT be used for double_dqn
"""
samples = [
{
"q_agent": [0, 1, 2],
"action": 1,
"is_done": False,
"q_target": [0, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 25,
},
{
"q_agent": [0, 1, 2],
"action": 1,
"is_done": False,
"q_target": [2, 0, 1],
"gamma": 0.5,
"reward": 5,
"answer": 25,
},
{
"q_agent": [3, 1, 2],
"action": 1,
"is_done": True,
"q_target": [0, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 16,
},
{
"q_agent": [0, 1, 2],
"action": 0,
"is_done": False,
"q_target": [0, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 36,
},
]
for sample in samples:
agent = MockAgent(torch.tensor(sample["q_agent"], dtype=torch.float)[None])
tn = MockAgent(torch.tensor(sample["q_target"], dtype=torch.float)[None])
ans = compute_td_loss(
states=torch.empty(1),
actions=torch.tensor(sample["action"])[None],
rewards=torch.tensor(sample["reward"])[None],
next_states=torch.empty(1),
is_done=torch.tensor(sample["is_done"])[None],
agent=agent,
target_network=tn,
gamma=sample["gamma"],
).item()
abs_diff = abs(ans - sample["answer"])
assert abs_diff < 1e-8, abs_diff
@torch.no_grad()
def test_compute_td_loss_double(compute_td_loss: ComputeTdLossProtocol):
"""
Checks compute_td_loss on manually precomputed examples.
Note: this is a test for vanilla compute_td_loss
and it should NOT be used for double_dqn
"""
samples = [
{
"q_agent": [0, 1, 2],
"action": 1,
"is_done": False,
"q_target": [0, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 25,
},
{
"q_agent": [0, 1, 2],
"action": 1,
"is_done": False,
"q_target": [2, 0, 1],
"gamma": 0.5,
"reward": 5,
"answer": 20.25,
},
{
"q_agent": [3, 1, 2],
"action": 1,
"is_done": False,
"q_target": [-1, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 12.25,
},
{
"q_agent": [3, 1, 2],
"action": 1,
"is_done": True,
"q_target": [-1, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 16,
},
{
"q_agent": [0, 1, 2],
"action": 0,
"is_done": False,
"q_target": [0, 1, 2],
"gamma": 0.5,
"reward": 5,
"answer": 36,
},
]
for sample in samples:
agent = MockAgent(torch.tensor(sample["q_agent"], dtype=torch.float)[None])
tn = MockAgent(torch.tensor(sample["q_target"], dtype=torch.float)[None])
ans = compute_td_loss(
states=torch.empty(1),
actions=torch.tensor(sample["action"])[None],
rewards=torch.tensor(sample["reward"])[None],
next_states=torch.empty(1),
is_done=torch.tensor(sample["is_done"])[None],
agent=agent,
target_network=tn,
gamma=sample["gamma"],
).item()
abs_diff = abs(ans - sample["answer"])
assert abs_diff < 1e-8, abs_diff
|
MockAgent
|
python
|
pypa__pip
|
src/pip/_internal/index/collector.py
|
{
"start": 8639,
"end": 12546
}
|
class ____(HTMLParser):
"""
HTMLParser that keeps the first base HREF and a list of all anchor
elements' attributes.
"""
def __init__(self, url: str) -> None:
super().__init__(convert_charrefs=True)
self.url: str = url
self.base_url: str | None = None
self.anchors: list[dict[str, str | None]] = []
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
if tag == "base" and self.base_url is None:
href = self.get_href(attrs)
if href is not None:
self.base_url = href
elif tag == "a":
self.anchors.append(dict(attrs))
def get_href(self, attrs: list[tuple[str, str | None]]) -> str | None:
for name, value in attrs:
if name == "href":
return value
return None
def _handle_get_simple_fail(
link: Link,
reason: str | Exception,
meth: Callable[..., None] | None = None,
) -> None:
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_index_content(
response: Response, cache_link_parsing: bool = True
) -> IndexContent:
encoding = _get_encoding_from_headers(response.headers)
return IndexContent(
response.content,
response.headers["Content-Type"],
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing,
)
def _get_index_content(link: Link, *, session: PipSession) -> IndexContent | None:
url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning(
"Cannot look at %s URL %s because it does not support lookup as web pages.",
vcs_scheme,
link,
)
return None
# Tack index.html onto file:// URLs that point to directories
if url.startswith("file:") and os.path.isdir(url_to_path(url)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith("/"):
url += "/"
# TODO: In the future, it would be nice if pip supported PEP 691
# style responses in the file:// URLs, however there's no
# standard file extension for application/vnd.pypi.simple.v1+json
# so we'll need to come up with something on our own.
url = urllib.parse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_simple_response(url, session=session)
except _NotHTTP:
logger.warning(
"Skipping page %s because it looks like an archive, and cannot "
"be checked by a HTTP HEAD request.",
link,
)
except _NotAPIContent as exc:
logger.warning(
"Skipping page %s because the %s request got Content-Type: %s. "
"The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
"application/vnd.pypi.simple.v1+html, and text/html",
link,
exc.request_desc,
exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_simple_fail(link, exc)
except RetryError as exc:
_handle_get_simple_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_simple_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_simple_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_simple_fail(link, "timed out")
else:
return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
return None
|
HTMLLinkParser
|
python
|
apache__airflow
|
providers/openlineage/tests/unit/openlineage/utils/test_utils.py
|
{
"start": 2996,
"end": 3050
}
|
class ____(BashOperator):
pass
|
CustomOperatorForTest
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_init.py
|
{
"start": 36644,
"end": 42796
}
|
class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 4
@skip_if_lt_x_gpu(1)
def test_1d_process_group_init(self):
assert self.world_size == 4, f"{self.world_size}"
# For convenience, use device mesh's infra to construct the DP PG
# (in practice, the trainer would do it manually via `new_group()`)
dp_size = 2
global_mesh = init_device_mesh(
device_type.type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
ref_dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
dp_pg = ref_dp_mesh.get_group(0)
# Check the `from_group()` API for correctness
dp_mesh = DeviceMesh.from_group(dp_pg, device_type.type, mesh_dim_names=("dp",))
# Only compare the mesh tensors, not `DeviceMesh` objects themselves,
# since the ref has a parent mesh, while the `from_group` one does not
self.assertEqual(dp_mesh.mesh, ref_dp_mesh.mesh)
self.assertEqual(dp_mesh._coordinate_on_dim, ref_dp_mesh._coordinate_on_dim)
self.assertEqual(dp_mesh._dim_group_names, ref_dp_mesh._dim_group_names)
# Check 1D FSDP forward/backward parity over the DP mesh
# NOTE: We cannot use 2D DTensor-based training here because the DP
# mesh from `from_group` does not respect the parent mesh.
torch.manual_seed(42)
mlp_dim = 8
ref_model = MLP(mlp_dim)
for param in ref_model.parameters():
dist.broadcast(param.detach(), src=0)
model = copy.deepcopy(ref_model)
# Parallelize the test model with the ref DP mesh
for module in (ref_model.in_proj, ref_model.out_proj, ref_model):
fully_shard(module, mesh=ref_dp_mesh)
# Parallelize the test model with the new DP mesh from the PG
for module in (model.in_proj, model.out_proj, model):
fully_shard(module, mesh=dp_mesh)
# Ensure that TP ranks have the same input
inp = torch.randn((4, mlp_dim), device=device_type.type)
if self.rank in (0, 1):
dist.broadcast(inp, src=0, group=tp_mesh.get_group(0))
elif self.rank in (2, 3):
dist.broadcast(inp, src=2, group=tp_mesh.get_group(0))
ref_loss = ref_model(inp).sum()
ref_loss.backward()
loss = model(inp).sum()
loss.backward()
self.assertEqual(loss, ref_loss)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
# Cannot compare `DTensor`s directly since their meshes are not
# equal due to the ref parameter's mesh having a parent mesh while
# the other's mesh does not
self.assertEqual(param.to_local(), ref_param.to_local())
self.assertEqual(param.device_mesh.mesh, ref_param.device_mesh.mesh)
self.assertEqual(param.grad.to_local(), ref_param.grad.to_local())
self.assertEqual(
param.grad.device_mesh.mesh, ref_param.grad.device_mesh.mesh
)
@skip_if_lt_x_gpu(1)
def test_2d_process_group_init(self):
shard_mesh_dim_size = 2
assert self.world_size % shard_mesh_dim_size == 0, (
f"Expects {self.world_size} to be divisible by {shard_mesh_dim_size}"
)
replicate_mesh_dim_size = self.world_size // shard_mesh_dim_size
mesh_dim_names = ("replicate", "shard")
ref_mesh = init_device_mesh(
device_type.type,
(replicate_mesh_dim_size, shard_mesh_dim_size),
mesh_dim_names=mesh_dim_names,
)
# Use the global PG as the parent group (in practice, this could be a
# subgroup of the global PG)
dp_group = dist.distributed_c10d._get_default_group()
dp_shard_group = _init_intra_node_process_group(shard_mesh_dim_size)
dp_replicate_group = _init_inter_node_process_group(
dp_group, replicate_mesh_dim_size
)
mesh_tensor = torch.tensor(
dist.get_process_group_ranks(dp_group), dtype=torch.int
).view(replicate_mesh_dim_size, shard_mesh_dim_size)
# Check the `from_group()` API for correctness
mesh = DeviceMesh.from_group(
[dp_replicate_group, dp_shard_group],
device_type.type,
mesh_dim_names=mesh_dim_names,
mesh=mesh_tensor,
)
self.assertEqual(mesh.mesh, ref_mesh.mesh)
self.assertEqual(mesh._coordinate_on_dim, ref_mesh._coordinate_on_dim)
for mesh_dim_name in mesh_dim_names:
child_mesh = mesh[mesh_dim_name]
ref_child_mesh = ref_mesh[mesh_dim_name]
self.assertEqual(child_mesh, ref_child_mesh)
child_ranks = dist.distributed_c10d.get_process_group_ranks(
child_mesh.get_group()
)
ref_child_ranks = dist.distributed_c10d.get_process_group_ranks(
ref_child_mesh.get_group()
)
self.assertEqual(child_ranks, ref_child_ranks)
# Check HSDP forward/backward parity
torch.manual_seed(42)
mlp_dim = 8
ref_model = MLP(mlp_dim)
for param in ref_model.parameters():
dist.broadcast(param.detach(), src=0)
model = copy.deepcopy(ref_model)
# Parallelize the test model with the ref mesh
for module in (ref_model.in_proj, ref_model.out_proj, ref_model):
fully_shard(module, mesh=ref_mesh)
# Parallelize the test model with the new mesh from the PG
for module in (model.in_proj, model.out_proj, model):
fully_shard(module, mesh=mesh)
inp = torch.randn((4, mlp_dim), device=device_type.type)
ref_loss = ref_model(inp).sum()
ref_loss.backward()
loss = model(inp).sum()
loss.backward()
self.assertEqual(loss, ref_loss)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
self.assertEqual(param, ref_param)
self.assertEqual(param.grad, ref_param.grad)
|
TestFullyShardProcessGroupInit
|
python
|
catalyst-team__catalyst
|
tests/pipelines/test_mnist_multicriterion.py
|
{
"start": 728,
"end": 8951
}
|
class ____(dl.Runner):
def predict_batch(self, batch):
# model inference step
return self.model(batch[0].to(self.device))
def on_loader_start(self, runner):
super().on_loader_start(runner)
self.meters = {
key: metrics.AdditiveMetric(compute_on_call=False)
for key in ["loss", "accuracy01", "accuracy03"]
}
def handle_batch(self, batch):
# model train/valid step
# unpack the batch
x, y = batch
# run model forward pass
logits = self.model(x)
# <--- multi-criterion usage --->
# compute the loss
loss_multiclass = self.criterion["multiclass"](logits, y)
loss_multilabel = self.criterion["multilabel"](
logits, F.one_hot(y, 10).to(torch.float32)
)
loss = loss_multiclass + loss_multilabel
# <--- multi-criterion usage --->
# compute other metrics of interest
accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))
# log metrics
self.batch_metrics.update(
{"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
)
for key in ["loss", "accuracy01", "accuracy03"]:
self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)
# run model backward pass
if self.is_train_loader:
self.engine.backward(loss)
self.optimizer.step()
self.optimizer.zero_grad()
def on_loader_end(self, runner):
for key in ["loss", "accuracy01", "accuracy03"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
def train_experiment(engine=None):
with TemporaryDirectory() as logdir:
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
optimizer = optim.Adam(model.parameters(), lr=0.02)
# <--- multi-criterion setup --->
criterion = {
"multiclass": nn.CrossEntropyLoss(),
"multilabel": nn.BCEWithLogitsLoss(),
}
# <--- multi-criterion setup --->
loaders = {
"train": DataLoader(
MNIST(DATA_ROOT, train=True),
batch_size=32,
),
"valid": DataLoader(
MNIST(DATA_ROOT, train=False),
batch_size=32,
),
}
runner = CustomRunner()
# model training
runner.train(
engine=engine,
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
logdir=logdir,
num_epochs=1,
verbose=False,
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
)
def train_experiment_from_configs(*auxiliary_configs: str):
run_experiment_from_configs(
Path(__file__).parent / "configs",
f"{Path(__file__).stem}.yml",
*auxiliary_configs,
)
# Device
@mark.skipif(not IS_CPU_REQUIRED, reason="CUDA device is not available")
def test_run_on_cpu():
train_experiment(dl.CPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not IS_CPU_REQUIRED, reason="CPU device is not available"
)
def test_config_run_on_cpu():
train_experiment_from_configs("engine_cpu.yml")
@mark.skipif(
not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]), reason="CUDA device is not available"
)
def test_run_on_torch_cuda0():
train_experiment(dl.GPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]),
reason="CUDA device is not available",
)
def test_config_run_on_torch_cuda0():
train_experiment_from_configs("engine_gpu.yml")
@mark.skipif(
not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_run_on_amp():
train_experiment(dl.GPUEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_config_run_on_amp():
train_experiment_from_configs("engine_gpu_amp.yml")
# DP
@mark.skipif(
not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_dp():
train_experiment(dl.DataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_dp():
train_experiment_from_configs("engine_dp.yml")
@mark.skipif(
not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_dp():
train_experiment(dl.DataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_dp():
train_experiment_from_configs("engine_dp_amp.yml")
# DDP
# @mark.skipif(
# not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_run_on_torch_ddp():
# train_experiment(dl.DistributedDataParallelEngine())
# @mark.skipif(
# not IS_CONFIGS_REQUIRED
# or not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_config_run_on_torch_ddp():
# train_experiment_from_configs("engine_ddp.yml")
# @mark.skipif(
# not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_run_on_amp_ddp():
# train_experiment(dl.DistributedDataParallelEngine(fp16=True))
# @mark.skipif(
# not IS_CONFIGS_REQUIRED
# or not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_config_run_on_amp_ddp():
# train_experiment_from_configs("engine_ddp_amp.yml")
# def _train_fn(local_rank, world_size):
# process_group_kwargs = {
# "backend": "nccl",
# "world_size": world_size,
# }
# os.environ["WORLD_SIZE"] = str(world_size)
# os.environ["RANK"] = str(local_rank)
# os.environ["LOCAL_RANK"] = str(local_rank)
# dist.init_process_group(**process_group_kwargs)
# train_experiment(dl.Engine())
# dist.destroy_process_group()
# @mark.skipif(
# not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_run_on_torch_ddp_spawn():
# world_size: int = torch.cuda.device_count()
# mp.spawn(
# _train_fn,
# args=(world_size,),
# nprocs=world_size,
# join=True,
# )
# def _train_fn_amp(local_rank, world_size):
# process_group_kwargs = {
# "backend": "nccl",
# "world_size": world_size,
# }
# os.environ["WORLD_SIZE"] = str(world_size)
# os.environ["RANK"] = str(local_rank)
# os.environ["LOCAL_RANK"] = str(local_rank)
# dist.init_process_group(**process_group_kwargs)
# train_experiment(dl.Engine(fp16=True))
# dist.destroy_process_group()
# @mark.skipif(
# not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_run_on_torch_ddp_amp_spawn():
# world_size: int = torch.cuda.device_count()
# mp.spawn(
# _train_fn_amp,
# args=(world_size,),
# nprocs=world_size,
# join=True,
# )
# dist.destroy_process_group()
|
CustomRunner
|
python
|
realpython__materials
|
python-operator-module/attrgetter_sorting.py
|
{
"start": 63,
"end": 699
}
|
class ____:
id: int
fname: str
lname: str
group: str
musician_lists = [
[1, "Brian", "Wilson", "Beach Boys"],
[2, "Carl", "Wilson", "Beach Boys"],
[3, "Dennis", "Wilson", "Beach Boys"],
[4, "Bruce", "Johnston", "Beach Boys"],
[5, "Hank", "Marvin", "Shadows"],
[6, "Bruce", "Welch", "Shadows"],
[7, "Brian", "Bennett", "Shadows"],
]
group_members = []
for musician in musician_lists:
group_members.append(Musician(*musician))
# Sorting on a single attribute.
get_id = operator.attrgetter("id")
for musician in sorted(group_members, key=get_id, reverse=True):
print(musician)
|
Musician
|
python
|
scipy__scipy
|
benchmarks/benchmarks/integrate.py
|
{
"start": 790,
"end": 2235
}
|
class ____(Benchmark):
TOL = 1e-5
def fun_flow(self, x, y, p):
A = p[0]
return np.vstack((
y[1], y[2], 100 * (y[1] ** 2 - y[0] * y[2] - A),
y[4], -100 * y[0] * y[4] - 1, y[6], -70 * y[0] * y[6]
))
def bc_flow(self, ya, yb, p):
return np.array([
ya[0], ya[1], yb[0] - 1, yb[1], ya[3], yb[3], ya[5], yb[5] - 1])
def time_flow(self):
x = np.linspace(0, 1, 10)
y = np.ones((7, x.size))
solve_bvp(self.fun_flow, self.bc_flow, x, y, p=[1], tol=self.TOL)
def fun_peak(self, x, y):
eps = 1e-3
return np.vstack((
y[1],
-(4 * x * y[1] + 2 * y[0]) / (eps + x**2)
))
def bc_peak(self, ya, yb):
eps = 1e-3
v = (1 + eps) ** -1
return np.array([ya[0] - v, yb[0] - v])
def time_peak(self):
x = np.linspace(-1, 1, 5)
y = np.zeros((2, x.size))
solve_bvp(self.fun_peak, self.bc_peak, x, y, tol=self.TOL)
def fun_gas(self, x, y):
alpha = 0.8
return np.vstack((
y[1],
-2 * x * y[1] * (1 - alpha * y[0]) ** -0.5
))
def bc_gas(self, ya, yb):
return np.array([ya[0] - 1, yb[0]])
def time_gas(self):
x = np.linspace(0, 3, 5)
y = np.empty((2, x.size))
y[0] = 0.5
y[1] = -0.5
solve_bvp(self.fun_gas, self.bc_gas, x, y, tol=self.TOL)
|
SolveBVP
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_parameters.py
|
{
"start": 4730,
"end": 5480
}
|
class ____(M2):
m3d = Parameter(default=20.0)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.0
assert mod.m1b == 5.0
assert mod.m2c == 11.0
assert mod.m3d == 20.0
for key in ["m1a", "m1b", "m2c", "m3d"]:
assert key in mod.__dict__
assert mod.param_names == ("m1a", "m1b", "m2c", "m3d")
def test_param_metric():
mod = M3()
assert mod._param_metrics["m1a"]["slice"] == slice(0, 1)
assert mod._param_metrics["m1b"]["slice"] == slice(1, 2)
assert mod._param_metrics["m2c"]["slice"] == slice(2, 3)
assert mod._param_metrics["m3d"]["slice"] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1.0, 5.0, 11.0, 20], dtype=np.float64)).all()
|
M3
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/components_tests/component_tree_tests/test_component_tree_reloading.py
|
{
"start": 239,
"end": 658
}
|
class ____(dg.Component):
"""Forthright and by the book."""
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
# this should only ever be called once
sentinel_path = context.component_path.file_path / "sentinel.txt"
assert not sentinel_path.exists()
sentinel_path.touch()
return dg.Definitions(assets=[dg.AssetSpec("singleton")])
|
SingletonComponent
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-kibela/llama_index/readers/kibela/base.py
|
{
"start": 373,
"end": 494
}
|
class ____(BaseModel):
startCursor: Optional[str]
endCursor: Optional[str]
hasNextPage: Optional[bool]
|
PageInfo
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 10942,
"end": 11556
}
|
class ____(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
alias_filenames = ['*.xml', '*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
**options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
|
VelocityXmlLexer
|
python
|
falconry__falcon
|
tests/test_headers.py
|
{
"start": 3364,
"end": 3679
}
|
class ____:
URL1 = '/\u00e7runchy/bacon'
URL2 = 'ab\u00e7'
def on_get(self, req, resp):
resp.location = self.URL1
resp.content_location = self.URL2
def on_head(self, req, resp):
resp.location = self.URL2
resp.content_location = self.URL1
|
LocationHeaderUnicodeResource
|
python
|
huggingface__transformers
|
tests/models/ctrl/test_modeling_ctrl.py
|
{
"start": 1180,
"end": 6335
}
|
class ____:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (
config,
input_ids,
input_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self):
return CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
dff=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids}
return config, inputs_dict
@require_torch
|
CTRLModelTester
|
python
|
vyperlang__vyper
|
vyper/cli/compile_archive.py
|
{
"start": 453,
"end": 2836
}
|
class ____(Exception):
pass
def compile_from_zip(file_name, output_formats, settings, no_bytecode_metadata):
compiler_data = compiler_data_from_zip(file_name, settings, no_bytecode_metadata)
return outputs_from_compiler_data(compiler_data, output_formats)
def compiler_data_from_zip(file_name, settings, no_bytecode_metadata):
with open(file_name, "rb") as f:
bcontents = f.read()
try:
buf = io.BytesIO(bcontents)
archive = zipfile.ZipFile(buf, mode="r")
except zipfile.BadZipFile as e1:
try:
# `validate=False` - tools like base64 can generate newlines
# for readability. validate=False does the "correct" thing and
# simply ignores these
bcontents = base64.b64decode(bcontents, validate=False)
buf = io.BytesIO(bcontents)
archive = zipfile.ZipFile(buf, mode="r")
except (zipfile.BadZipFile, binascii.Error):
raise NotZipInput() from e1
fcontents = archive.read("MANIFEST/compilation_targets").decode("utf-8")
compilation_targets = fcontents.splitlines()
if len(compilation_targets) != 1:
raise BadArchive("Multiple compilation targets not supported!")
input_bundle = ZipInputBundle(archive)
storage_layout_path = "MANIFEST/storage_layout.json"
storage_layout = None
if storage_layout_path in archive.namelist():
storage_layout_map = json.loads(archive.read(storage_layout_path).decode("utf-8"))
storage_layout = input_bundle.load_json_file(storage_layout_map[compilation_targets[0]])
mainpath = PurePath(compilation_targets[0])
file = input_bundle.load_file(mainpath)
assert isinstance(file, FileInput) # mypy hint
settings = settings or Settings()
archive_settings_txt = archive.read("MANIFEST/settings.json").decode("utf-8")
archive_settings = Settings.from_dict(json.loads(archive_settings_txt))
integrity = archive.read("MANIFEST/integrity").decode("utf-8").strip()
settings = merge_settings(
settings, archive_settings, lhs_source="command line", rhs_source="archive settings"
)
return CompilerData(
file,
input_bundle=input_bundle,
storage_layout=storage_layout,
integrity_sum=integrity,
settings=settings,
no_bytecode_metadata=no_bytecode_metadata,
)
|
NotZipInput
|
python
|
django__django
|
django/db/models/functions/text.py
|
{
"start": 4834,
"end": 5769
}
|
class ____(Func):
function = "LEFT"
arity = 2
output_field = CharField()
def __init__(self, expression, length, **extra):
"""
expression: the name of a field, or an expression returning a string
length: the number of characters to return from the start of the string
"""
if not hasattr(length, "resolve_expression"):
if length < 1:
raise ValueError("'length' must be greater than 0.")
super().__init__(expression, length, **extra)
def get_substr(self):
return Substr(self.source_expressions[0], Value(1), self.source_expressions[1])
def as_oracle(self, compiler, connection, **extra_context):
return self.get_substr().as_oracle(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return self.get_substr().as_sqlite(compiler, connection, **extra_context)
|
Left
|
python
|
huggingface__transformers
|
src/transformers/models/mllama/modeling_mllama.py
|
{
"start": 15619,
"end": 21440
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config: Optional[MllamaTextConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.config = config
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.head_dim = config.hidden_size // self.num_heads
self.layer_idx = layer_idx
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
cross_attention_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = self.q_norm(query_states)
if cross_attention_states is not None:
key_states = self.k_proj(cross_attention_states)
value_states = self.v_proj(cross_attention_states)
key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
key_states = self.k_norm(key_states)
if past_key_values is not None:
# if we have a new image + new tokens, we only computed key_states on that new image
# we still update the cross key states, past_image, new_image. And use it!
key_states, value_states = past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
elif cache_position[0] != 0:
key_states, value_states = (
past_key_values.layers[self.layer_idx].keys,
past_key_values.layers[self.layer_idx].values,
)
else:
raise ValueError(
"Cross attention layer can't find neither `cross_attn_states` nor cached values for key/values!"
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
|
MllamaTextCrossAttention
|
python
|
ray-project__ray
|
python/ray/tests/test_concurrency_group.py
|
{
"start": 5957,
"end": 6325
}
|
class ____:
def __init__(self):
self._thread_local_data = threading.local()
def set_thread_local(self, value: Any) -> int:
self._thread_local_data.value = value
return threading.current_thread().ident
def get_thread_local(self) -> Tuple[Any, int]:
return self._thread_local_data.value, threading.current_thread().ident
|
Actor
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/resolvelib/providers.py
|
{
"start": 5049,
"end": 5871
}
|
class ____(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError
|
AbstractResolver
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_reflection.py
|
{
"start": 113226,
"end": 117740
}
|
class ____(fixtures.TablesTest):
run_inserts = run_deletes = None
__sparse_driver_backend__ = True
__requires__ = ("identity_columns", "table_reflection")
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column("normal", Integer),
Column("id1", Integer, Identity()),
)
Table(
"t2",
metadata,
Column(
"id2",
Integer,
Identity(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
),
)
if testing.requires.schemas.enabled:
Table(
"t1",
metadata,
Column("normal", Integer),
Column("id1", Integer, Identity(always=True, start=20)),
schema=config.test_schema,
)
def check(self, value, exp, approx):
if testing.requires.identity_columns_standard.enabled:
common_keys = (
"always",
"start",
"increment",
"minvalue",
"maxvalue",
"cycle",
"cache",
)
for k in list(value):
if k not in common_keys:
value.pop(k)
if approx:
eq_(len(value), len(exp))
for k in value:
if k == "minvalue":
is_true(value[k] <= exp[k])
elif k in {"maxvalue", "cache"}:
is_true(value[k] >= exp[k])
else:
eq_(value[k], exp[k], k)
else:
eq_(value, exp)
else:
eq_(value["start"], exp["start"])
eq_(value["increment"], exp["increment"])
def test_reflect_identity(self):
insp = inspect(config.db)
cols = insp.get_columns("t1") + insp.get_columns("t2")
for col in cols:
if col["name"] == "normal":
is_false("identity" in col)
elif col["name"] == "id1":
if "autoincrement" in col:
is_true(col["autoincrement"])
eq_(col["default"], None)
is_true("identity" in col)
self.check(
col["identity"],
dict(
always=False,
start=1,
increment=1,
minvalue=1,
maxvalue=2147483647,
cycle=False,
cache=1,
),
approx=True,
)
elif col["name"] == "id2":
if "autoincrement" in col:
is_true(col["autoincrement"])
eq_(col["default"], None)
is_true("identity" in col)
self.check(
col["identity"],
dict(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
approx=False,
)
@testing.requires.schemas
def test_reflect_identity_schema(self):
insp = inspect(config.db)
cols = insp.get_columns("t1", schema=config.test_schema)
for col in cols:
if col["name"] == "normal":
is_false("identity" in col)
elif col["name"] == "id1":
if "autoincrement" in col:
is_true(col["autoincrement"])
eq_(col["default"], None)
is_true("identity" in col)
self.check(
col["identity"],
dict(
always=True,
start=20,
increment=1,
minvalue=1,
maxvalue=2147483647,
cycle=False,
cache=1,
),
approx=True,
)
|
IdentityReflectionTest
|
python
|
huggingface__transformers
|
src/transformers/models/luke/configuration_luke.py
|
{
"start": 766,
"end": 6628
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LUKE
[studio-ousia/luke-base](https://huggingface.co/studio-ousia/luke-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50267):
Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LukeModel`].
entity_vocab_size (`int`, *optional*, defaults to 500000):
Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented
by the `entity_ids` passed when calling [`LukeModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
entity_emb_size (`int`, *optional*, defaults to 256):
The number of dimensions of the entity embedding.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LukeModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_entity_aware_attention (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the entity-aware self-attention mechanism proposed in [LUKE: Deep
Contextualized Entity Representations with Entity-aware Self-attention (Yamada et
al.)](https://huggingface.co/papers/2010.01057).
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
Examples:
```python
>>> from transformers import LukeConfig, LukeModel
>>> # Initializing a LUKE configuration
>>> configuration = LukeConfig()
>>> # Initializing a model from the configuration
>>> model = LukeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "luke"
def __init__(
self,
vocab_size=50267,
entity_vocab_size=500000,
hidden_size=768,
entity_emb_size=256,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_entity_aware_attention=True,
classifier_dropout=None,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
"""Constructs LukeConfig."""
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.hidden_size = hidden_size
self.entity_emb_size = entity_emb_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_entity_aware_attention = use_entity_aware_attention
self.classifier_dropout = classifier_dropout
__all__ = ["LukeConfig"]
|
LukeConfig
|
python
|
jamielennox__requests-mock
|
requests_mock/exceptions.py
|
{
"start": 548,
"end": 619
}
|
class ____(Exception):
"""Base Exception for library"""
|
MockException
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/other/test_statsd.py
|
{
"start": 926,
"end": 14198
}
|
class ____:
"""Tests statsd."""
def test_should_create_statsd_default(self):
docs = render_chart(show_only=["templates/statsd/statsd-deployment.yaml"])
assert jmespath.search("metadata.name", docs[0]) == "release-name-statsd"
assert jmespath.search("spec.template.spec.containers[0].name", docs[0]) == "statsd"
assert {"name": "config", "configMap": {"name": "release-name-statsd"}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
assert {
"name": "config",
"mountPath": "/etc/statsd-exporter",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
default_args = ["--statsd.mapping-config=/etc/statsd-exporter/mappings.yml"]
assert default_args == jmespath.search("spec.template.spec.containers[0].args", docs[0])
def test_should_add_volume_and_volume_mount_when_exist_extra_mappings(self):
extra_mapping = {
"match": "airflow.pool.queued_slots.*",
"name": "airflow_pool_queued_slots",
"labels": {"pool": "$1"},
}
docs = render_chart(
values={"statsd": {"enabled": True, "extraMappings": [extra_mapping]}},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert {"name": "config", "configMap": {"name": "release-name-statsd"}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
assert {
"name": "config",
"mountPath": "/etc/statsd-exporter",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
def test_should_add_volume_and_volume_mount_when_exist_override_mappings(self):
override_mapping = {
"match": "airflow.pool.queued_slots.*",
"name": "airflow_pool_queued_slots",
"labels": {"pool": "$1"},
}
docs = render_chart(
values={"statsd": {"enabled": True, "overrideMappings": [override_mapping]}},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert {"name": "config", "configMap": {"name": "release-name-statsd"}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
assert {
"name": "config",
"mountPath": "/etc/statsd-exporter",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
@pytest.mark.parametrize(
("revision_history_limit", "global_revision_history_limit"),
[(8, 10), (10, 8), (8, None), (None, 10), (None, None)],
)
def test_revision_history_limit(self, revision_history_limit, global_revision_history_limit):
values = {"statsd": {"enabled": True}}
if revision_history_limit:
values["statsd"]["revisionHistoryLimit"] = revision_history_limit
if global_revision_history_limit:
values["revisionHistoryLimit"] = global_revision_history_limit
docs = render_chart(
values=values,
show_only=["templates/statsd/statsd-deployment.yaml"],
)
expected_result = revision_history_limit or global_revision_history_limit
assert jmespath.search("spec.revisionHistoryLimit", docs[0]) == expected_result
def test_scheduler_name(self):
docs = render_chart(
values={"schedulerName": "airflow-scheduler"},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert (
jmespath.search(
"spec.template.spec.schedulerName",
docs[0],
)
== "airflow-scheduler"
)
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"statsd": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("kind", docs[0]) == "Deployment"
assert (
jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
== "foo"
)
assert (
jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
== "ssd"
)
assert (
jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
== "dynamic-pods"
)
def test_stastd_resources_are_configurable(self):
docs = render_chart(
values={
"statsd": {
"resources": {
"limits": {"cpu": "200m", "memory": "128Mi"},
"requests": {"cpu": "300m", "memory": "169Mi"},
}
},
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0]) == "128Mi"
assert (
jmespath.search("spec.template.spec.containers[0].resources.requests.memory", docs[0]) == "169Mi"
)
assert jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0]) == "300m"
def test_statsd_security_contexts_are_configurable(self):
docs = render_chart(
values={
"statsd": {
"securityContexts": {
"pod": {
"fsGroup": 1000,
"runAsGroup": 1001,
"runAsNonRoot": True,
"runAsUser": 2000,
},
"container": {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
},
}
},
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].securityContext", docs[0]) == {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
}
assert jmespath.search("spec.template.spec.securityContext", docs[0]) == {
"runAsUser": 2000,
"runAsGroup": 1001,
"fsGroup": 1000,
"runAsNonRoot": True,
}
def test_statsd_security_context_legacy(self):
docs = render_chart(
values={
"statsd": {
"securityContext": {
"fsGroup": 1000,
"runAsGroup": 1001,
"runAsNonRoot": True,
"runAsUser": 2000,
}
},
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.securityContext", docs[0]) == {
"runAsUser": 2000,
"runAsGroup": 1001,
"fsGroup": 1000,
"runAsNonRoot": True,
}
def test_statsd_resources_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
def test_statsd_configmap_by_default(self):
docs = render_chart(show_only=["templates/configmaps/statsd-configmap.yaml"])
mappings_yml = jmespath.search('data."mappings.yml"', docs[0])
mappings_yml_obj = yaml.safe_load(mappings_yml)
names = [mapping["name"] for mapping in mappings_yml_obj["mappings"]]
assert "airflow_dagrun_dependency_check" in names
assert "airflow_pool_starving_tasks" in names
def test_statsd_configmap_when_exist_extra_mappings(self):
extra_mapping = {
"match": "airflow.pool.queued_slots.*",
"name": "airflow_pool_queued_slots",
"labels": {"pool": "$1"},
}
docs = render_chart(
values={"statsd": {"enabled": True, "extraMappings": [extra_mapping]}},
show_only=["templates/configmaps/statsd-configmap.yaml"],
)
mappings_yml = jmespath.search('data."mappings.yml"', docs[0])
mappings_yml_obj = yaml.safe_load(mappings_yml)
assert mappings_yml_obj["mappings"][0]["name"] == "airflow_dagrun_dependency_check"
assert mappings_yml_obj["mappings"][-1]["name"] == "airflow_pool_queued_slots"
def test_statsd_configmap_when_exist_override_mappings(self):
override_mapping = {
"match": "airflow.pool.queued_slots.*",
"name": "airflow_pool_queued_slots",
"labels": {"pool": "$1"},
}
docs = render_chart(
values={"statsd": {"enabled": True, "overrideMappings": [override_mapping]}},
show_only=["templates/configmaps/statsd-configmap.yaml"],
)
mappings_yml = jmespath.search('data."mappings.yml"', docs[0])
mappings_yml_obj = yaml.safe_load(mappings_yml)
assert len(mappings_yml_obj["mappings"]) == 1
assert mappings_yml_obj["mappings"][0]["name"] == "airflow_pool_queued_slots"
def test_statsd_args_can_be_overridden(self):
args = ["--some-arg=foo"]
docs = render_chart(
values={"statsd": {"enabled": True, "args": args}},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == args
def test_should_add_component_specific_annotations(self):
docs = render_chart(
values={
"statsd": {
"annotations": {"test_annotation": "test_annotation_value"},
"podAnnotations": {"test_pod_annotation": "test_pod_annotation_value"},
},
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert "annotations" in jmespath.search("metadata", docs[0])
assert jmespath.search("metadata.annotations", docs[0])["test_annotation"] == "test_annotation_value"
assert "test_pod_annotation" in jmespath.search("spec.template.metadata.annotations", docs[0])
assert (
jmespath.search("spec.template.metadata.annotations", docs[0])["test_pod_annotation"]
== "test_pod_annotation_value"
)
def test_should_add_custom_env_variables(self):
env1 = {"name": "TEST_ENV_1", "value": "test_env_1"}
docs = render_chart(
values={
"statsd": {
"enabled": True,
"env": [env1],
},
},
show_only=["templates/statsd/statsd-deployment.yaml"],
)[0]
assert jmespath.search("spec.template.spec.containers[0].env", docs) == [env1]
def test_should_add_annotations_to_statsd_configmap(self):
docs = render_chart(
values={
"statsd": {
"enabled": True,
"configMapAnnotations": {"test_annotation": "test_annotation_value"},
},
},
show_only=["templates/configmaps/statsd-configmap.yaml"],
)[0]
assert "annotations" in jmespath.search("metadata", docs)
assert jmespath.search("metadata.annotations", docs)["test_annotation"] == "test_annotation_value"
@pytest.mark.parametrize(
("statsd_values", "expected"),
[
({}, 30),
({"statsd": {"terminationGracePeriodSeconds": 1200}}, 1200),
],
)
def test_statsd_termination_grace_period_seconds(self, statsd_values, expected):
docs = render_chart(
values=statsd_values,
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert expected == jmespath.search("spec.template.spec.terminationGracePeriodSeconds", docs[0])
|
TestStatsd
|
python
|
numpy__numpy
|
numpy/_core/tests/test_multiarray.py
|
{
"start": 16649,
"end": 21614
}
|
class ____:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2], dtype=object)
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_0d_array_shape(self):
assert np.ones(np.array(3)).shape == (3,)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
np.array(d, copy=False, order='F')
def test_array_copy_if_needed(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=None)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=None, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1, 2, 3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1, 2, 3]])
def test_array_copy_str(self):
with pytest.raises(
ValueError,
match="strings are not allowed for 'copy' keyword. "
"Use True/False/None instead."
):
np.array([1, 2, 3], copy="always")
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2, ::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_bad_arguments_error(self, func):
with pytest.raises(TypeError):
func(3, dtype="bad dtype")
with pytest.raises(TypeError):
func() # missing arguments
with pytest.raises(TypeError):
func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_array_as_keyword(self, func):
# This should likely be made positional only, but do not change
# the name accidentally.
if func is np.array:
func(object=3)
else:
func(a=3)
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_array_signature(self, func):
sig = inspect.signature(func)
assert len(sig.parameters) >= 3
arg0 = "object" if func is np.array else "a"
assert arg0 in sig.parameters
assert sig.parameters[arg0].default is inspect.Parameter.empty
assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert "dtype" in sig.parameters
assert sig.parameters["dtype"].default is None
assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert "like" in sig.parameters
assert sig.parameters["like"].default is None
assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY
|
TestArrayConstruction
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-substring-with-maximum-cost.py
|
{
"start": 77,
"end": 626
}
|
class ____(object):
def maximumCostSubstring(self, s, chars, vals):
"""
:type s: str
:type chars: str
:type vals: List[int]
:rtype: int
"""
def kadane(s):
result = curr = 0
for c in s:
curr = max(curr+(lookup[c] if c in lookup else ord(c)-ord('a')+1), 0)
result = max(result, curr)
return result
lookup = {}
for c, v in itertools.izip(chars, vals):
lookup[c] = v
return kadane(s)
|
Solution
|
python
|
huggingface__transformers
|
tests/models/idefics3/test_image_processing_idefics3.py
|
{
"start": 1243,
"end": 6473
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_images=1,
image_size=18,
min_resolution=30,
max_resolution=40,
do_resize=True,
size=None,
max_image_size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
do_pad=True,
do_image_splitting=True,
resample=PILImageResampling.LANCZOS,
):
self.size = size if size is not None else {"longest_edge": max_resolution}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_images = num_images
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.resample = resample
self.do_image_splitting = do_image_splitting
self.max_image_size = max_image_size if max_image_size is not None else {"longest_edge": 20}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.do_pad = do_pad
def prepare_image_processor_dict(self):
return {
"do_convert_rgb": self.do_convert_rgb,
"do_resize": self.do_resize,
"size": self.size,
"max_image_size": self.max_image_size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"do_image_splitting": self.do_image_splitting,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to Idefics3ImageProcessor,
assuming do_resize is set to True. The expected size in that case the max image size.
"""
return self.max_image_size["longest_edge"], self.max_image_size["longest_edge"]
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
effective_nb_images = (
self.num_images * 5 if self.do_image_splitting else 1
) # 5 is a squared image divided into 4 + global image resized
return effective_nb_images, self.num_channels, height, width
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
# Numpy images are typically in channels last format
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
@require_torch
@require_vision
|
Idefics3ImageProcessingTester
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/many_to_many/tutorial003_py310.py
|
{
"start": 681,
"end": 3756
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_links: list[HeroTeamLink] = Relationship(back_populates="hero")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
)
hero_spider_boy = Hero(
name="Spider-Boy",
secret_name="Pedro Parqueador",
)
deadpond_team_z_link = HeroTeamLink(team=team_z_force, hero=hero_deadpond)
deadpond_preventers_link = HeroTeamLink(
team=team_preventers, hero=hero_deadpond, is_training=True
)
spider_boy_preventers_link = HeroTeamLink(
team=team_preventers, hero=hero_spider_boy, is_training=True
)
rusty_man_preventers_link = HeroTeamLink(
team=team_preventers, hero=hero_rusty_man
)
session.add(deadpond_team_z_link)
session.add(deadpond_preventers_link)
session.add(spider_boy_preventers_link)
session.add(rusty_man_preventers_link)
session.commit()
for link in team_z_force.hero_links:
print("Z-Force hero:", link.hero, "is training:", link.is_training)
for link in team_preventers.hero_links:
print("Preventers hero:", link.hero, "is training:", link.is_training)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
team_z_force = session.exec(select(Team).where(Team.name == "Z-Force")).one()
spider_boy_z_force_link = HeroTeamLink(
team=team_z_force, hero=hero_spider_boy, is_training=True
)
team_z_force.hero_links.append(spider_boy_z_force_link)
session.add(team_z_force)
session.commit()
print("Updated Spider-Boy's Teams:", hero_spider_boy.team_links)
print("Z-Force heroes:", team_z_force.hero_links)
for link in hero_spider_boy.team_links:
if link.team.name == "Preventers":
link.is_training = False
session.add(hero_spider_boy)
session.commit()
for link in hero_spider_boy.team_links:
print("Spider-Boy team:", link.team, "is training:", link.is_training)
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
{
"start": 1056,
"end": 8309
}
|
class ____(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Caution - the AST references held by this object are weak.
Scope objects are mutable during construction only, and must be frozen using
`Scope.finalize()` before use. Furthermore, a scope is consistent only after
all its children have been frozen. While analysing code blocks, scopes are
being gradually built, from the innermost scope outward. Freezing indicates
that the analysis of a code block is complete. Once frozen, mutation is no
longer allowed. `is_final` tracks whether the scope is frozen or not. Certain
properties, like `referenced`, are only accurate when called on frozen scopes.
Attributes:
parent: Optional[Scope], the parent scope, if any.
isolated: bool, whether the scope is a true Python scope (e.g. the scope of
a function), or just a surrogate tracking an ordinary code block. Using
the terminology of the Python 3 reference documentation, True roughly
represents an actual scope, whereas False represents an ordinary code
block.
function_name: Optional[str], name of the function owning this scope.
isolated_names: Set[qual_names.QN], identifiers that are isolated to this
scope (even if the scope is not isolated).
annotations: Set[qual_names.QN], identifiers used as type annotations
in this scope.
read: Set[qual_names.QN], identifiers read in this scope.
modified: Set[qual_names.QN], identifiers modified in this scope.
deleted: Set[qual_names.QN], identifiers deleted in this scope.
bound: Set[qual_names.QN], names that are bound to this scope. See
https://docs.python.org/3/reference/executionmodel.html#binding-of-names
for a precise definition.
globals: Set[qual_names.QN], names that are explicitly marked as global in
this scope. Note that this doesn't include free read-only vars bound to
global symbols.
nonlocals: Set[qual_names.QN], names that are explicitly marked as nonlocal
in this scope. Note that this doesn't include free read-only vars bound to
global symbols.
free_vars: Set[qual_names.QN], the free variables in this scope. See
https://docs.python.org/3/reference/executionmodel.html for a precise
definition.
params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments
visible in this scope, mapped to the function node that defines them.
enclosing_scope: Scope, the innermost isolated scope that is a transitive
parent of this scope. May be the scope itself.
referenced: Set[qual_names.QN], the totality of the symbols used by this
scope and its parents.
is_final: bool, whether the scope is frozen or not.
Note - simple statements may never delete and modify a symbol at the same
time. However, compound ones like if statements can. In that latter case, it's
undefined whether the symbol is actually modified or deleted upon statement
exit. Certain analyses like reaching definitions need to be careful about
this.
"""
# Note: this mutable-immutable pattern is used because using a builder would
# have taken a lot more boilerplate.
def __init__(self, parent, isolated=True, function_name=None):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
modified in this scope should be considered modified in the parent
scope.
function_name: Name of the function owning this scope.
"""
self.parent = parent
self.isolated = isolated
self.function_name = function_name
self.isolated_names = set()
self.read = set()
self.modified = set()
self.deleted = set()
self.bound = set()
self.globals = set()
self.nonlocals = set()
self.annotations = set()
self.params = weakref.WeakValueDictionary()
# Certain fields can only be accessed after the scope and all its parent
# scopes have been fully built. This field guards that.
self.is_final = False
@property
def enclosing_scope(self):
assert self.is_final
if self.parent is not None and not self.isolated:
return self.parent
return self
@property
def referenced(self):
if self.parent is not None:
return self.read | self.parent.referenced
return self.read
@property
def free_vars(self):
enclosing_scope = self.enclosing_scope
return enclosing_scope.read - enclosing_scope.bound
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.copy_from(other.parent)
self.isolated_names = copy.copy(other.isolated_names)
self.modified = copy.copy(other.modified)
self.read = copy.copy(other.read)
self.deleted = copy.copy(other.deleted)
self.bound = copy.copy(other.bound)
self.annotations = copy.copy(other.annotations)
self.params = copy.copy(other.params)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
assert other.parent is not None
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
"""Adds all activity from another scope to this scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.merge_from(other.parent)
self.isolated_names.update(other.isolated_names)
self.read.update(other.read)
self.modified.update(other.modified)
self.bound.update(other.bound)
self.deleted.update(other.deleted)
self.annotations.update(other.annotations)
self.params.update(other.params)
def finalize(self):
"""Freezes this scope."""
assert not self.is_final
# TODO(mdan): freeze read, modified, bound.
if self.parent is not None:
assert not self.parent.is_final
if not self.isolated:
self.parent.read.update(self.read - self.isolated_names)
self.parent.modified.update(self.modified - self.isolated_names)
self.parent.bound.update(self.bound - self.isolated_names)
self.parent.globals.update(self.globals)
self.parent.nonlocals.update(self.nonlocals)
self.parent.annotations.update(self.annotations)
else:
# TODO(mdan): This is not accurate.
self.parent.read.update(self.read - self.bound)
self.parent.annotations.update(self.annotations - self.bound)
self.is_final = True
def __repr__(self):
return 'Scope{r=%s, w=%s}' % (tuple(self.read), tuple(self.modified))
def mark_param(self, name, owner):
# Assumption: all AST nodes have the same life span. This lets us use
# a weak reference to mark the connection between a symbol node and the
# function node whose argument that symbol is.
self.params[name] = owner
|
Scope
|
python
|
PyCQA__isort
|
tests/unit/test_exceptions.py
|
{
"start": 46,
"end": 359
}
|
class ____:
def setup_class(self):
self.instance = exceptions.ISortError()
def test_init(self):
assert isinstance(self.instance, exceptions.ISortError)
def test_pickleable(self):
assert isinstance(pickle.loads(pickle.dumps(self.instance)), exceptions.ISortError)
|
TestISortError
|
python
|
pytorch__pytorch
|
test/inductor/test_efficient_conv_bn_eval.py
|
{
"start": 1289,
"end": 2656
}
|
class ____(nn.Module):
expected_optimization_count = 3
def __init__(
self,
conv_class,
bn_class,
use_bias,
in_channels,
out_channels,
device,
**kwargs,
):
super().__init__()
self.conv1 = conv_class(in_channels, out_channels, bias=use_bias, **kwargs).to(
device
)
self.bn1 = bn_class(out_channels).to(device)
self.conv2 = conv_class(out_channels, out_channels, bias=use_bias, **kwargs).to(
device
)
self.bn2 = bn_class(out_channels).to(device)
self.conv3 = conv_class(out_channels, out_channels, bias=use_bias, **kwargs).to(
device
)
self.bn3 = bn_class(out_channels).to(device)
def forward(self, x):
# this conv-bn pair can use efficient_conv_bn_eval
x = self.bn1(self.conv1(input=x))
# this conv-bn pair cannot use efficient_conv_bn_eval feature
# just for the second forward of the `self.conv2`
x = self.bn2(input=self.conv2(self.conv2(x)))
# this conv-bn pair can use efficient_conv_bn_eval feature
# just for the first forward of the `self.bn3`
# test for multiple users of one computation node
x = self.bn3(input=self.conv3(input=x))
x = self.bn3(x) + x
return x
|
MultiUserConvOp
|
python
|
apache__thrift
|
lib/py/src/Thrift.py
|
{
"start": 1984,
"end": 2208
}
|
class ____(Exception):
"""Base class for all thrift exceptions."""
def __init__(self, message=None):
Exception.__init__(self, message)
super(TException, self).__setattr__("message", message)
|
TException
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/nested_structure_coder_test.py
|
{
"start": 21915,
"end": 22218
}
|
class ____(type_spec.TypeSpec):
value_type = property(lambda self: None)
_component_specs = property(lambda self: ())
_to_components = lambda self, v: ()
_from_components = classmethod(lambda cls, c: cls())
_serialize = lambda self: ()
if __name__ == "__main__":
test.main()
|
RegisteredTypeSpec
|
python
|
coleifer__peewee
|
tests/postgres.py
|
{
"start": 26830,
"end": 26927
}
|
class ____(TestModel):
id = IdentityField(generate_always=True)
data = CharField()
|
IDAlways
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/operators/hash_aggregate.py
|
{
"start": 691,
"end": 3769
}
|
class ____(StatefulShuffleAggregation):
"""Aggregation performing reduction of the shuffled sequence using provided
list of aggregating functions.
NOTE: That reductions are performed incrementally in a streaming fashion upon
accumulation of pre-configured buffer of rows to run aggregation on."""
_DEFAULT_BLOCKS_BUFFER_LIMIT = 1000
def __init__(
self,
aggregator_id: int,
key_columns: Optional[Tuple[str]],
aggregation_fns: Tuple[AggregateFn],
):
super().__init__(aggregator_id)
assert key_columns is not None, "Shuffle aggregation requires key columns"
self._sort_key: "SortKey" = ReducingShuffleAggregation._get_sort_key(
key_columns
)
self._aggregation_fns: Tuple[AggregateFn] = aggregation_fns
self._aggregated_blocks: List[Block] = []
def accept(self, input_seq_id: int, partition_id: int, partition_shard: Block):
assert (
input_seq_id == 0
), f"Single sequence is expected (got seq-id {input_seq_id})"
# Received partition shard is already partially aggregated, hence
# we simply add it to the list of aggregated blocks
#
# NOTE: We're not separating blocks by partition as it's ultimately not
# relevant for the aggregations performed
self._aggregated_blocks.append(partition_shard)
# Aggregation is performed incrementally, rather
# than being deferred to the finalization stage
if len(self._aggregated_blocks) > self._DEFAULT_BLOCKS_BUFFER_LIMIT:
# NOTE: This method will reset partially aggregated blocks to hold
# the new combined one
#
# TODO make aggregation async
self._combine_aggregated_blocks(should_finalize=False)
def finalize(self, partition_id: int) -> Block:
if len(self._aggregated_blocks) == 0:
return ArrowBlockAccessor._empty_table()
return self._combine_aggregated_blocks(should_finalize=True)
def clear(self, partition_id: int):
self._aggregated_blocks: List[Block] = []
def _combine_aggregated_blocks(self, *, should_finalize: bool) -> Block:
assert len(self._aggregated_blocks) > 0
block_accessor = BlockAccessor.for_block(self._aggregated_blocks[0])
combined_block, _ = block_accessor._combine_aggregated_blocks(
self._aggregated_blocks,
sort_key=self._sort_key,
aggs=self._aggregation_fns,
finalize=should_finalize,
)
# For combined block that's not yet finalized reset cached aggregated
# blocks to only hold newly combined one
if not should_finalize:
self._aggregated_blocks = [combined_block]
return combined_block
@staticmethod
def _get_sort_key(key_columns: Tuple[str]):
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
return SortKey(key=list(key_columns), descending=False)
|
ReducingShuffleAggregation
|
python
|
apache__airflow
|
providers/qdrant/tests/integration/qdrant/operators/test_qdrant_ingest.py
|
{
"start": 1173,
"end": 2474
}
|
class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_qdrant_dag_id", default_args=args)
self.mock_context = MagicMock()
self.channel = "test"
def test_execute_hello(self):
collection_name = "test-operator-collection"
dimensions = 384
points_count = 100
vectors = [[random.uniform(0, 1) for _ in range(dimensions)] for _ in range(points_count)]
ids = random.sample(range(100, 10000), points_count)
payload = [{"some_number": i % 10} for i in range(points_count)]
operator = QdrantIngestOperator(
task_id="qdrant_ingest",
conn_id="qdrant_default",
collection_name=collection_name,
vectors=vectors,
ids=ids,
payload=payload,
batch_size=1,
)
hook = operator.hook
hook.conn.create_collection(
collection_name, vectors_config=VectorParams(size=dimensions, distance=Distance.COSINE)
)
operator.execute(self.mock_context)
assert hook.conn.count(collection_name=collection_name).count == points_count, (
f"Added {points_count} points to the Qdrant collection"
)
|
TestQdrantIngestOperator
|
python
|
gevent__gevent
|
src/greentest/3.9/test_ssl.py
|
{
"start": 96877,
"end": 110311
}
|
class ____(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
|
ThreadedEchoServer
|
python
|
pytorch__pytorch
|
torch/_inductor/pattern_matcher.py
|
{
"start": 27690,
"end": 27848
}
|
class ____(_TargetArgsExpr):
"""
Matches a call_function node in the FX graphs: `fns[i](*args, **kwargs)`
"""
op = "call_function"
|
CallFunction
|
python
|
getsentry__sentry
|
tests/sentry/api/test_base.py
|
{
"start": 3827,
"end": 14017
}
|
class ____(APITestCase):
def test_basic_cors(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = "http://example.com"
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://example.com"
assert response["Access-Control-Allow-Headers"] == (
"X-Sentry-Auth, X-Requested-With, Origin, Accept, "
"Content-Type, Authentication, Authorization, Content-Encoding, "
"sentry-trace, baggage, X-CSRFToken"
)
assert response["Access-Control-Expose-Headers"] == (
"X-Sentry-Error, X-Sentry-Direct-Hit, X-Hits, X-Max-Hits, "
"Endpoint, Retry-After, Link"
)
assert response["Access-Control-Allow-Methods"] == "GET, HEAD, OPTIONS"
assert "Access-Control-Allow-Credentials" not in response
@override_options({"system.base-hostname": "example.com"})
def test_allow_credentials_subdomain(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
request = self.make_request(method="GET")
# Origin is a subdomain of base-hostname, and is cors allowed
request.META["HTTP_ORIGIN"] = "http://acme.example.com"
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://acme.example.com"
assert response["Access-Control-Allow-Headers"] == (
"X-Sentry-Auth, X-Requested-With, Origin, Accept, "
"Content-Type, Authentication, Authorization, Content-Encoding, "
"sentry-trace, baggage, X-CSRFToken"
)
assert response["Access-Control-Expose-Headers"] == (
"X-Sentry-Error, X-Sentry-Direct-Hit, X-Hits, X-Max-Hits, "
"Endpoint, Retry-After, Link"
)
assert response["Access-Control-Allow-Methods"] == "GET, HEAD, OPTIONS"
assert response["Access-Control-Allow-Credentials"] == "true"
@override_options({"system.base-hostname": "example.com"})
def test_allow_credentials_root_domain(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
request = self.make_request(method="GET")
# Origin is base-hostname, and is cors allowed
request.META["HTTP_ORIGIN"] = "http://example.com"
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://example.com"
assert response["Access-Control-Allow-Headers"] == (
"X-Sentry-Auth, X-Requested-With, Origin, Accept, "
"Content-Type, Authentication, Authorization, Content-Encoding, "
"sentry-trace, baggage, X-CSRFToken"
)
assert response["Access-Control-Expose-Headers"] == (
"X-Sentry-Error, X-Sentry-Direct-Hit, X-Hits, X-Max-Hits, "
"Endpoint, Retry-After, Link"
)
assert response["Access-Control-Allow-Methods"] == "GET, HEAD, OPTIONS"
assert response["Access-Control-Allow-Credentials"] == "true"
@override_options({"system.base-hostname": "example.com"})
@override_settings(ALLOWED_CREDENTIAL_ORIGINS=["http://docs.example.org"])
def test_allow_credentials_allowed_domain(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
request = self.make_request(method="GET")
# Origin is an allowed domain
request.META["HTTP_ORIGIN"] = "http://docs.example.org"
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://docs.example.org"
assert response["Access-Control-Allow-Headers"] == (
"X-Sentry-Auth, X-Requested-With, Origin, Accept, "
"Content-Type, Authentication, Authorization, Content-Encoding, "
"sentry-trace, baggage, X-CSRFToken"
)
assert response["Access-Control-Expose-Headers"] == (
"X-Sentry-Error, X-Sentry-Direct-Hit, X-Hits, X-Max-Hits, "
"Endpoint, Retry-After, Link"
)
assert response["Access-Control-Allow-Methods"] == "GET, HEAD, OPTIONS"
assert response["Access-Control-Allow-Credentials"] == "true"
@override_options({"system.base-hostname": "acme.com"})
def test_allow_credentials_incorrect(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
for http_origin in ["http://acme.example.com", "http://fakeacme.com"]:
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = http_origin
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert "Access-Control-Allow-Credentials" not in response
@override_options({"system.base-hostname": "acme.com"})
def test_disallow_credentials_when_two_origins(self) -> None:
org = self.create_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
apikey = ApiKey.objects.create(organization_id=org.id, allowed_origins="*")
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = "http://evil.com, http://acme.com"
request.META["HTTP_AUTHORIZATION"] = self.create_basic_auth_header(apikey.key)
response = _dummy_endpoint(request)
response.render()
assert "Access-Control-Allow-Credentials" not in response
def test_invalid_cors_without_auth(self) -> None:
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = "http://example.com"
with self.settings(SENTRY_ALLOW_ORIGIN="https://sentry.io"):
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 400, response.content
def test_valid_cors_without_auth(self) -> None:
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = "http://example.com"
with self.settings(SENTRY_ALLOW_ORIGIN="*"):
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://example.com"
# XXX(dcramer): The default setting needs to allow requests to work or it will be a regression
def test_cors_not_configured_is_valid(self) -> None:
request = self.make_request(method="GET")
request.META["HTTP_ORIGIN"] = "http://example.com"
with self.settings(SENTRY_ALLOW_ORIGIN=None):
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 200, response.content
assert response["Access-Control-Allow-Origin"] == "http://example.com"
assert response["Access-Control-Allow-Headers"] == (
"X-Sentry-Auth, X-Requested-With, Origin, Accept, "
"Content-Type, Authentication, Authorization, Content-Encoding, "
"sentry-trace, baggage, X-CSRFToken"
)
assert response["Access-Control-Expose-Headers"] == (
"X-Sentry-Error, X-Sentry-Direct-Hit, X-Hits, X-Max-Hits, "
"Endpoint, Retry-After, Link"
)
assert response["Access-Control-Allow-Methods"] == "GET, HEAD, OPTIONS"
def test_update_token_access_record_is_called(self) -> None:
token_str = generate_token(self.organization.slug, "")
token_hashed = hash_token(token_str)
token = self.create_org_auth_token(
name="org-auth-token",
token_hashed=token_hashed,
organization_id=self.organization.id,
token_last_characters="xyz",
scope_list=["org:ci"],
date_last_used=None,
)
assert token.date_last_used is None
with outbox_runner():
request = self.make_request(method="GET")
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token_str}"
_dummy_endpoint(request=request)
with self.tasks(), assume_test_silo_mode(SiloMode.REGION):
schedule_hybrid_cloud_foreign_key_jobs()
token.refresh_from_db()
assert isinstance(token.date_last_used, datetime)
@mock.patch("sentry.api.base.Endpoint.convert_args")
def test_method_not_allowed(self, mock_convert_args: MagicMock) -> None:
request = self.make_request(method="POST")
# Run this particular test in monolith mode to prevent RPC interactions
with assume_test_silo_mode(SiloMode.MONOLITH):
response = _dummy_endpoint(request)
response.render()
assert response.status_code == 405, response.content
# did not try to convert args
assert not mock_convert_args.info.called
|
EndpointTest
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 9552,
"end": 10056
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
num_categorical = float(
metafeatures["NumberOfCategoricalFeatures"](X, y, logger, feat_type).value
)
num_numerical = float(
metafeatures["NumberOfNumericFeatures"](X, y, logger, feat_type).value
)
if num_categorical == 0.0:
return 0.0
return num_numerical / num_categorical
@metafeatures.define("RatioNominalToNumerical")
|
RatioNumericalToNominal
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/session.py
|
{
"start": 2052,
"end": 2651
}
|
class ____(BaseModel):
group_id: Optional[str] = None
"""
The group id to attach to this trace to enable filtering and grouping in the
traces dashboard.
"""
metadata: Optional[object] = None
"""
The arbitrary metadata to attach to this trace to enable filtering in the traces
dashboard.
"""
workflow_name: Optional[str] = None
"""The name of the workflow to attach to this trace.
This is used to name the trace in the traces dashboard.
"""
Tracing: TypeAlias = Union[Literal["auto"], TracingTracingConfiguration]
|
TracingTracingConfiguration
|
python
|
numba__numba
|
numba/cuda/stubs.py
|
{
"start": 7168,
"end": 7483
}
|
class ____(Stub):
"""
ffs(x)
Returns the position of the first (least significant) bit set to 1 in x,
where the least significant bit position is 1. ffs(0) returns 0.
"""
#-------------------------------------------------------------------------------
# comparison and selection instructions
|
ffs
|
python
|
python-openxml__python-docx
|
src/docx/opc/oxml.py
|
{
"start": 2745,
"end": 3652
}
|
class ____(BaseOxmlElement):
"""`<Default>` element that appears in `[Content_Types].xml` part.
Used to specify a default content type to be applied to any part with the specified extension.
"""
@property
def content_type(self):
"""String held in the ``ContentType`` attribute of this ``<Default>``
element."""
return self.get("ContentType")
@property
def extension(self):
"""String held in the ``Extension`` attribute of this ``<Default>`` element."""
return self.get("Extension")
@staticmethod
def new(ext: str, content_type: str):
"""Return a new ``<Default>`` element with attributes set to parameter values."""
xml = '<Default xmlns="%s"/>' % nsmap["ct"]
default = parse_xml(xml)
default.set("Extension", ext)
default.set("ContentType", content_type)
return default
|
CT_Default
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/queries/near_object/query/sync.py
|
{
"start": 310,
"end": 453
}
|
class ____(
Generic[Properties, References],
_NearObjectQueryExecutor[ConnectionSync, Properties, References],
):
pass
|
_NearObjectQuery
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/save_context_test.py
|
{
"start": 889,
"end": 3191
}
|
class ____(test.TestCase):
def test_multi_thread(self):
self.assertFalse(save_context.in_save_context())
with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):
save_context.get_save_options()
options = save_options.SaveOptions(save_debug_info=True)
with save_context.save_context(options):
self.assertTrue(save_context.in_save_context())
self.assertTrue(save_context.get_save_options().save_debug_info)
entered_context_in_thread = threading.Event()
continue_thread = threading.Event()
def thread_fn():
self.assertFalse(save_context.in_save_context())
with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):
save_context.get_save_options()
options = save_options.SaveOptions(save_debug_info=False)
with save_context.save_context(options):
self.assertTrue(save_context.in_save_context())
# save_debug_info has a different value in this thread.
self.assertFalse(save_context.get_save_options().save_debug_info)
entered_context_in_thread.set()
continue_thread.wait()
self.assertFalse(save_context.in_save_context())
with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):
save_context.get_save_options()
t = threading.Thread(target=thread_fn)
t.start()
entered_context_in_thread.wait()
# Another thread shouldn't affect this thread.
self.assertTrue(save_context.in_save_context())
self.assertTrue(save_context.get_save_options().save_debug_info)
continue_thread.set()
t.join()
# Another thread exiting SaveContext shouldn't affect this thread.
self.assertTrue(save_context.in_save_context())
self.assertTrue(save_context.get_save_options().save_debug_info)
self.assertFalse(save_context.in_save_context())
with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):
save_context.get_save_options()
def test_enter_multiple(self):
options = save_options.SaveOptions()
with self.assertRaisesRegex(ValueError, 'Already in a SaveContext'):
with save_context.save_context(options):
with save_context.save_context(options):
pass
if __name__ == '__main__':
test.main()
|
SaveContextTest
|
python
|
pytorch__pytorch
|
torch/_dynamo/device_interface.py
|
{
"start": 17758,
"end": 17818
}
|
class ____:
multi_processor_count: int
|
CpuDeviceProperties
|
python
|
paramiko__paramiko
|
paramiko/channel.py
|
{
"start": 2041,
"end": 47659
}
|
class ____(ClosingContextManager):
"""
A secure tunnel across an SSH `.Transport`. A Channel is meant to behave
like a socket, and has an API that should be indistinguishable from the
Python socket API.
Because SSH2 has a windowing kind of flow control, if you stop reading data
from a Channel and its buffer fills up, the server will be unable to send
you any more data until you read some of it. (This won't affect other
channels on the same transport -- all channels on a single transport are
flow-controlled independently.) Similarly, if the server isn't reading
data you send, calls to `send` may block, unless you set a timeout. This
is exactly like a normal network socket, so it shouldn't be too surprising.
Instances of this class may be used as context managers.
"""
def __init__(self, chanid):
"""
Create a new channel. The channel is not associated with any
particular session or `.Transport` until the Transport attaches it.
Normally you would only call this method from the constructor of a
subclass of `.Channel`.
:param int chanid:
the ID of this channel, as passed by an existing `.Transport`.
"""
#: Channel ID
self.chanid = chanid
#: Remote channel ID
self.remote_chanid = 0
#: `.Transport` managing this channel
self.transport = None
#: Whether the connection is presently active
self.active = False
self.eof_received = 0
self.eof_sent = 0
self.in_buffer = BufferedPipe()
self.in_stderr_buffer = BufferedPipe()
self.timeout = None
#: Whether the connection has been closed
self.closed = False
self.ultra_debug = False
self.lock = threading.Lock()
self.out_buffer_cv = threading.Condition(self.lock)
self.in_window_size = 0
self.out_window_size = 0
self.in_max_packet_size = 0
self.out_max_packet_size = 0
self.in_window_threshold = 0
self.in_window_sofar = 0
self.status_event = threading.Event()
self._name = str(chanid)
self.logger = util.get_logger("paramiko.transport")
self._pipe = None
self.event = threading.Event()
self.event_ready = False
self.combine_stderr = False
self.exit_status = -1
self.origin_addr = None
def __del__(self):
try:
self.close()
except:
pass
def __repr__(self):
"""
Return a string representation of this object, for debugging.
"""
out = "<paramiko.Channel {}".format(self.chanid)
if self.closed:
out += " (closed)"
elif self.active:
if self.eof_received:
out += " (EOF received)"
if self.eof_sent:
out += " (EOF sent)"
out += " (open) window={}".format(self.out_window_size)
if len(self.in_buffer) > 0:
out += " in-buffer={}".format(len(self.in_buffer))
out += " -> " + repr(self.transport)
out += ">"
return out
@open_only
def get_pty(
self,
term="vt100",
width=80,
height=24,
width_pixels=0,
height_pixels=0,
):
"""
Request a pseudo-terminal from the server. This is usually used right
after creating a client channel, to ask the server to provide some
basic terminal semantics for a shell invoked with `invoke_shell`.
It isn't necessary (or desirable) to call this method if you're going
to execute a single command with `exec_command`.
:param str term: the terminal type to emulate
(for example, ``'vt100'``)
:param int width: width (in characters) of the terminal screen
:param int height: height (in characters) of the terminal screen
:param int width_pixels: width (in pixels) of the terminal screen
:param int height_pixels: height (in pixels) of the terminal screen
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("pty-req")
m.add_boolean(True)
m.add_string(term)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
m.add_string(bytes())
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
@open_only
def invoke_shell(self):
"""
Request an interactive shell session on this channel. If the server
allows it, the channel will then be directly connected to the stdin,
stdout, and stderr of the shell.
Normally you would call `get_pty` before this, in which case the
shell will operate through the pty, and the channel will be connected
to the stdin and stdout of the pty.
When the shell exits, the channel will be closed and can't be reused.
You must open a new channel if you wish to open another shell.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("shell")
m.add_boolean(True)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
@open_only
def exec_command(self, command):
"""
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("exec")
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
@open_only
def invoke_subsystem(self, subsystem):
"""
Request a subsystem on the server (for example, ``sftp``). If the
server allows it, the channel will then be directly connected to the
requested subsystem.
When the subsystem finishes, the channel will be closed and can't be
reused.
:param str subsystem: name of the subsystem being requested.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("subsystem")
m.add_boolean(True)
m.add_string(subsystem)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
@open_only
def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0):
"""
Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous `get_pty` call.
:param int width: new width (in characters) of the terminal screen
:param int height: new height (in characters) of the terminal screen
:param int width_pixels: new width (in pixels) of the terminal screen
:param int height_pixels: new height (in pixels) of the terminal screen
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("window-change")
m.add_boolean(False)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
self.transport._send_user_message(m)
@open_only
def update_environment(self, environment):
"""
Updates this channel's remote shell environment.
.. note::
This operation is additive - i.e. the current environment is not
reset before the given environment variables are set.
.. warning::
Servers may silently reject some environment variables; see the
warning in `set_environment_variable` for details.
:param dict environment:
a dictionary containing the name and respective values to set
:raises:
`.SSHException` -- if any of the environment variables was rejected
by the server or the channel was closed
"""
for name, value in environment.items():
try:
self.set_environment_variable(name, value)
except SSHException as e:
err = 'Failed to set environment variable "{}".'
raise SSHException(err.format(name), e)
@open_only
def set_environment_variable(self, name, value):
"""
Set the value of an environment variable.
.. warning::
The server may reject this request depending on its ``AcceptEnv``
setting; such rejections will fail silently (which is common client
practice for this particular request type). Make sure you
understand your server's configuration before using!
:param str name: name of the environment variable
:param str value: value of the environment variable
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("env")
m.add_boolean(False)
m.add_string(name)
m.add_string(value)
self.transport._send_user_message(m)
def exit_status_ready(self):
"""
Return true if the remote process has exited and returned an exit
status. You may use this to poll the process status if you don't
want to block in `recv_exit_status`. Note that the server may not
return an exit status in some cases (like bad servers).
:return:
``True`` if `recv_exit_status` will return immediately, else
``False``.
.. versionadded:: 1.7.3
"""
return self.closed or self.status_event.is_set()
def recv_exit_status(self):
"""
Return the exit status from the process on the server. This is
mostly useful for retrieving the results of an `exec_command`.
If the command hasn't finished yet, this method will wait until
it does, or until the channel is closed. If no exit status is
provided by the server, -1 is returned.
.. warning::
In some situations, receiving remote output larger than the current
`.Transport` or session's ``window_size`` (e.g. that set by the
``default_window_size`` kwarg for `.Transport.__init__`) will cause
`.recv_exit_status` to hang indefinitely if it is called prior to a
sufficiently large `.Channel.recv` (or if there are no threads
calling `.Channel.recv` in the background).
In these cases, ensuring that `.recv_exit_status` is called *after*
`.Channel.recv` (or, again, using threads) can avoid the hang.
:return: the exit code (as an `int`) of the process on the server.
.. versionadded:: 1.2
"""
self.status_event.wait()
assert self.status_event.is_set()
return self.exit_status
def send_exit_status(self, status):
"""
Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
:param int status: the exit code of the process
.. versionadded:: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("exit-status")
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m)
@open_only
def request_x11(
self,
screen_number=0,
auth_protocol=None,
auth_cookie=None,
single_connection=False,
handler=None,
):
"""
Request an x11 session on this channel. If the server allows it,
further x11 requests can be made from the server to the client,
when an x11 application is run in a shell session.
From :rfc:`4254`::
It is RECOMMENDED that the 'x11 authentication cookie' that is
sent be a fake, random cookie, and that the cookie be checked and
replaced by the real cookie when a connection request is received.
If you omit the auth_cookie, a new secure random 128-bit value will be
generated, used, and returned. You will need to use this value to
verify incoming x11 requests and replace them with the actual local
x11 cookie (which requires some knowledge of the x11 protocol).
If a handler is passed in, the handler is called from another thread
whenever a new x11 connection arrives. The default handler queues up
incoming x11 connections, which may be retrieved using
`.Transport.accept`. The handler's calling signature is::
handler(channel: Channel, (address: str, port: int))
:param int screen_number: the x11 screen number (0, 10, etc.)
:param str auth_protocol:
the name of the X11 authentication method used; if none is given,
``"MIT-MAGIC-COOKIE-1"`` is used
:param str auth_cookie:
hexadecimal string containing the x11 auth cookie; if none is
given, a secure random 128-bit value is generated
:param bool single_connection:
if True, only a single x11 connection will be forwarded (by
default, any number of x11 connections can arrive over this
session)
:param handler:
an optional callable handler to use for incoming X11 connections
:return: the auth_cookie used
"""
if auth_protocol is None:
auth_protocol = "MIT-MAGIC-COOKIE-1"
if auth_cookie is None:
auth_cookie = binascii.hexlify(os.urandom(16))
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("x11-req")
m.add_boolean(True)
m.add_boolean(single_connection)
m.add_string(auth_protocol)
m.add_string(auth_cookie)
m.add_int(screen_number)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
self.transport._set_x11_handler(handler)
return auth_cookie
@open_only
def request_forward_agent(self, handler):
"""
Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from OpenSSH !!!
:param handler:
a required callable handler to use for incoming SSH Agent
connections
:return: True if we are ok, else False
(at that time we always return ok)
:raises: SSHException in case of channel problem.
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("auth-agent-req@openssh.com")
m.add_boolean(False)
self.transport._send_user_message(m)
self.transport._set_forward_agent_handler(handler)
return True
def get_transport(self):
"""
Return the `.Transport` associated with this channel.
"""
return self.transport
def set_name(self, name):
"""
Set a name for this channel. Currently it's only used to set the name
of the channel in logfile entries. The name can be fetched with the
`get_name` method.
:param str name: new channel name
"""
self._name = name
def get_name(self):
"""
Get the name of this channel that was previously set by `set_name`.
"""
return self._name
def get_id(self):
"""
Return the `int` ID # for this channel.
The channel ID is unique across a `.Transport` and usually a small
number. It's also the number passed to
`.ServerInterface.check_channel_request` when determining whether to
accept a channel request in server mode.
"""
return self.chanid
def set_combine_stderr(self, combine):
"""
Set whether stderr should be combined into stdout on this channel.
The default is ``False``, but in some cases it may be convenient to
have both streams combined.
If this is ``False``, and `exec_command` is called (or ``invoke_shell``
with no pty), output to stderr will not show up through the `recv`
and `recv_ready` calls. You will have to use `recv_stderr` and
`recv_stderr_ready` to get stderr output.
If this is ``True``, data will never show up via `recv_stderr` or
`recv_stderr_ready`.
:param bool combine:
``True`` if stderr output should be combined into stdout on this
channel.
:return: the previous setting (a `bool`).
.. versionadded:: 1.1
"""
data = bytes()
self.lock.acquire()
try:
old = self.combine_stderr
self.combine_stderr = combine
if combine and not old:
# copy old stderr buffer into primary buffer
data = self.in_stderr_buffer.empty()
finally:
self.lock.release()
if len(data) > 0:
self._feed(data)
return old
# ...socket API...
def settimeout(self, timeout):
"""
Set a timeout on blocking read/write operations. The ``timeout``
argument can be a nonnegative float expressing seconds, or ``None``.
If a float is given, subsequent channel read/write operations will
raise a timeout exception if the timeout period value has elapsed
before the operation has completed. Setting a timeout of ``None``
disables timeouts on socket operations.
``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``;
``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``.
:param float timeout:
seconds to wait for a pending read/write operation before raising
``socket.timeout``, or ``None`` for no timeout.
"""
self.timeout = timeout
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with socket
operations, or ``None`` if no timeout is set. This reflects the last
call to `setblocking` or `settimeout`.
"""
return self.timeout
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode of the channel: if ``blocking`` is 0,
the channel is set to non-blocking mode; otherwise it's set to blocking
mode. Initially all channels are in blocking mode.
In non-blocking mode, if a `recv` call doesn't find any data, or if a
`send` call can't immediately dispose of the data, an error exception
is raised. In blocking mode, the calls block until they can proceed. An
EOF condition is considered "immediate data" for `recv`, so if the
channel is closed in the read direction, it will never block.
``chan.setblocking(0)`` is equivalent to ``chan.settimeout(0)``;
``chan.setblocking(1)`` is equivalent to ``chan.settimeout(None)``.
:param int blocking:
0 to set non-blocking mode; non-0 to set blocking mode.
"""
if blocking:
self.settimeout(None)
else:
self.settimeout(0.0)
def getpeername(self):
"""
Return the address of the remote side of this Channel, if possible.
This simply wraps `.Transport.getpeername`, used to provide enough of a
socket-like interface to allow asyncore to work. (asyncore likes to
call ``'getpeername'``.)
"""
return self.transport.getpeername()
def close(self):
"""
Close the channel. All future read/write operations on the channel
will fail. The remote end will receive no more data (after queued data
is flushed). Channels are automatically closed when their `.Transport`
is closed or when they are garbage collected.
"""
self.lock.acquire()
try:
# only close the pipe when the user explicitly closes the channel.
# otherwise they will get unpleasant surprises. (and do it before
# checking self.closed, since the remote host may have already
# closed the connection.)
if self._pipe is not None:
self._pipe.close()
self._pipe = None
if not self.active or self.closed:
return
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def recv_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel. A ``False`` result does not mean that the channel has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `recv` call on this channel would immediately return
at least one byte; ``False`` otherwise.
"""
return self.in_buffer.read_ready()
def recv(self, nbytes):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by ``nbytes``. If a string of
length zero is returned, the channel stream has closed.
:param int nbytes: maximum number of bytes to read.
:return: received data, as a `bytes`.
:raises socket.timeout:
if no data is ready before the timeout set by `settimeout`.
"""
try:
out = self.in_buffer.read(nbytes, self.timeout)
except PipeTimeout:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def recv_stderr_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel's stderr stream. Only channels using `exec_command` or
`invoke_shell` without a pty will ever have data on the stderr
stream.
:return:
``True`` if a `recv_stderr` call on this channel would immediately
return at least one byte; ``False`` otherwise.
.. versionadded:: 1.1
"""
return self.in_stderr_buffer.read_ready()
def recv_stderr(self, nbytes):
"""
Receive data from the channel's stderr stream. Only channels using
`exec_command` or `invoke_shell` without a pty will ever have data
on the stderr stream. The return value is a string representing the
data received. The maximum amount of data to be received at once is
specified by ``nbytes``. If a string of length zero is returned, the
channel stream has closed.
:param int nbytes: maximum number of bytes to read.
:return: received data as a `bytes`
:raises socket.timeout: if no data is ready before the timeout set by
`settimeout`.
.. versionadded:: 1.1
"""
try:
out = self.in_stderr_buffer.read(nbytes, self.timeout)
except PipeTimeout:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def send_ready(self):
"""
Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a `send` call will succeed immediately and return
the number of bytes actually written.
:return:
``True`` if a `send` call on this channel would immediately succeed
or fail
"""
self.lock.acquire()
try:
if self.closed or self.eof_sent:
return True
return self.out_window_size > 0
finally:
self.lock.release()
def send(self, s):
"""
Send data to the channel. Returns the number of bytes sent, or 0 if
the channel stream is closed. Applications are responsible for
checking that all data has been sent: if only some of the data was
transmitted, the application needs to attempt delivery of the remaining
data.
:param bytes s: data to send
:return: number of bytes actually sent, as an `int`
:raises socket.timeout: if no data could be sent before the timeout set
by `settimeout`.
"""
m = Message()
m.add_byte(cMSG_CHANNEL_DATA)
m.add_int(self.remote_chanid)
return self._send(s, m)
def send_stderr(self, s):
"""
Send data to the channel on the "stderr" stream. This is normally
only used by servers to send output from shell commands -- clients
won't use this. Returns the number of bytes sent, or 0 if the channel
stream is closed. Applications are responsible for checking that all
data has been sent: if only some of the data was transmitted, the
application needs to attempt delivery of the remaining data.
:param bytes s: data to send.
:return: number of bytes actually sent, as an `int`.
:raises socket.timeout:
if no data could be sent before the timeout set by `settimeout`.
.. versionadded:: 1.1
"""
m = Message()
m.add_byte(cMSG_CHANNEL_EXTENDED_DATA)
m.add_int(self.remote_chanid)
m.add_int(1)
return self._send(s, m)
def sendall(self, s):
"""
Send data to the channel, without allowing partial results. Unlike
`send`, this method continues to send data from the given string until
either all data has been sent or an error occurs. Nothing is returned.
:param bytes s: data to send.
:raises socket.timeout:
if sending stalled for longer than the timeout set by `settimeout`.
:raises socket.error:
if an error occurred before the entire string was sent.
.. note::
If the channel is closed while only part of the data has been
sent, there is no way to determine how much data (if any) was sent.
This is irritating, but identically follows Python's API.
"""
while s:
sent = self.send(s)
s = s[sent:]
return None
def sendall_stderr(self, s):
"""
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike `send_stderr`, this method continues to send data
from the given bytestring until all data has been sent or an error
occurs. Nothing is returned.
:param bytes s: data to send to the client as "stderr" output.
:raises socket.timeout:
if sending stalled for longer than the timeout set by `settimeout`.
:raises socket.error:
if an error occurred before the entire string was sent.
.. versionadded:: 1.1
"""
while s:
sent = self.send_stderr(s)
s = s[sent:]
return None
def makefile(self, *params):
"""
Return a file-like object associated with this channel. The optional
``mode`` and ``bufsize`` arguments are interpreted the same way as by
the built-in ``file()`` function in Python.
:return: `.ChannelFile` object which can be used for Python file I/O.
"""
return ChannelFile(*([self] + list(params)))
def makefile_stderr(self, *params):
"""
Return a file-like object associated with this channel's stderr
stream. Only channels using `exec_command` or `invoke_shell`
without a pty will ever have data on the stderr stream.
The optional ``mode`` and ``bufsize`` arguments are interpreted the
same way as by the built-in ``file()`` function in Python. For a
client, it only makes sense to open this file for reading. For a
server, it only makes sense to open this file for writing.
:returns:
`.ChannelStderrFile` object which can be used for Python file I/O.
.. versionadded:: 1.1
"""
return ChannelStderrFile(*([self] + list(params)))
def makefile_stdin(self, *params):
"""
Return a file-like object associated with this channel's stdin
stream.
The optional ``mode`` and ``bufsize`` arguments are interpreted the
same way as by the built-in ``file()`` function in Python. For a
client, it only makes sense to open this file for writing. For a
server, it only makes sense to open this file for reading.
:returns:
`.ChannelStdinFile` object which can be used for Python file I/O.
.. versionadded:: 2.6
"""
return ChannelStdinFile(*([self] + list(params)))
def fileno(self):
"""
Returns an OS-level file descriptor which can be used for polling, but
but not for reading or writing. This is primarily to allow Python's
``select`` module to work.
The first time ``fileno`` is called on a channel, a pipe is created to
simulate real OS-level file descriptor (FD) behavior. Because of this,
two OS-level FDs are created, which will use up FDs faster than normal.
(You won't notice this effect unless you have hundreds of channels
open at the same time.)
:return: an OS-level file descriptor (`int`)
.. warning::
This method causes channel reads to be slightly less efficient.
"""
self.lock.acquire()
try:
if self._pipe is not None:
return self._pipe.fileno()
# create the pipe and feed in any existing data
self._pipe = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(self._pipe)
self.in_buffer.set_event(p1)
self.in_stderr_buffer.set_event(p2)
return self._pipe.fileno()
finally:
self.lock.release()
def shutdown(self, how):
"""
Shut down one or both halves of the connection. If ``how`` is 0,
further receives are disallowed. If ``how`` is 1, further sends
are disallowed. If ``how`` is 2, further sends and receives are
disallowed. This closes the stream in one or both directions.
:param int how:
0 (stop receiving), 1 (stop sending), or 2 (stop receiving and
sending).
"""
if (how == 0) or (how == 2):
# feign "read" shutdown
self.eof_received = 1
if (how == 1) or (how == 2):
self.lock.acquire()
try:
m = self._send_eof()
finally:
self.lock.release()
if m is not None and self.transport is not None:
self.transport._send_user_message(m)
def shutdown_read(self):
"""
Shutdown the receiving side of this socket, closing the stream in
the incoming direction. After this call, future reads on this
channel will fail instantly. This is a convenience method, equivalent
to ``shutdown(0)``, for people who don't make it a habit to
memorize unix constants from the 1970s.
.. versionadded:: 1.2
"""
self.shutdown(0)
def shutdown_write(self):
"""
Shutdown the sending side of this socket, closing the stream in
the outgoing direction. After this call, future writes on this
channel will fail instantly. This is a convenience method, equivalent
to ``shutdown(1)``, for people who don't make it a habit to
memorize unix constants from the 1970s.
.. versionadded:: 1.2
"""
self.shutdown(1)
@property
def _closed(self):
# Concession to Python 3's socket API, which has a private ._closed
# attribute instead of a semipublic .closed attribute.
return self.closed
# ...calls from Transport
def _set_transport(self, transport):
self.transport = transport
self.logger = util.get_logger(self.transport.get_log_channel())
def _set_window(self, window_size, max_packet_size):
self.in_window_size = window_size
self.in_max_packet_size = max_packet_size
# threshold of bytes we receive before we bother to send
# a window update
self.in_window_threshold = window_size // 10
self.in_window_sofar = 0
self._log(DEBUG, "Max packet in: {} bytes".format(max_packet_size))
def _set_remote_channel(self, chanid, window_size, max_packet_size):
self.remote_chanid = chanid
self.out_window_size = window_size
self.out_max_packet_size = self.transport._sanitize_packet_size(
max_packet_size
)
self.active = 1
self._log(
DEBUG, "Max packet out: {} bytes".format(self.out_max_packet_size)
)
def _request_success(self, m):
self._log(DEBUG, "Sesch channel {} request ok".format(self.chanid))
self.event_ready = True
self.event.set()
return
def _request_failed(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def _feed(self, m):
if isinstance(m, bytes):
# passed from _feed_extended
s = m
else:
s = m.get_binary()
self.in_buffer.feed(s)
def _feed_extended(self, m):
code = m.get_int()
s = m.get_binary()
if code != 1:
self._log(
ERROR, "unknown extended_data type {}; discarding".format(code)
)
return
if self.combine_stderr:
self._feed(s)
else:
self.in_stderr_buffer.feed(s)
def _window_adjust(self, m):
nbytes = m.get_int()
self.lock.acquire()
try:
if self.ultra_debug:
self._log(DEBUG, "window up {}".format(nbytes))
self.out_window_size += nbytes
self.out_buffer_cv.notify_all()
finally:
self.lock.release()
def _handle_request(self, m):
key = m.get_text()
want_reply = m.get_boolean()
server = self.transport.server_object
ok = False
if key == "exit-status":
self.exit_status = m.get_int()
self.status_event.set()
ok = True
elif key == "xon-xoff":
# ignore
ok = True
elif key == "pty-req":
term = m.get_string()
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
modes = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_pty_request(
self, term, width, height, pixelwidth, pixelheight, modes
)
elif key == "shell":
if server is None:
ok = False
else:
ok = server.check_channel_shell_request(self)
elif key == "env":
name = m.get_string()
value = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_env_request(self, name, value)
elif key == "exec":
cmd = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_exec_request(self, cmd)
elif key == "subsystem":
name = m.get_text()
if server is None:
ok = False
else:
ok = server.check_channel_subsystem_request(self, name)
elif key == "window-change":
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_window_change_request(
self, width, height, pixelwidth, pixelheight
)
elif key == "x11-req":
single_connection = m.get_boolean()
auth_proto = m.get_text()
auth_cookie = m.get_binary()
screen_number = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_x11_request(
self,
single_connection,
auth_proto,
auth_cookie,
screen_number,
)
elif key == "auth-agent-req@openssh.com":
if server is None:
ok = False
else:
ok = server.check_channel_forward_agent_request(self)
else:
self._log(DEBUG, 'Unhandled channel request "{}"'.format(key))
ok = False
if want_reply:
m = Message()
if ok:
m.add_byte(cMSG_CHANNEL_SUCCESS)
else:
m.add_byte(cMSG_CHANNEL_FAILURE)
m.add_int(self.remote_chanid)
self.transport._send_user_message(m)
def _handle_eof(self, m):
self.lock.acquire()
try:
if not self.eof_received:
self.eof_received = True
self.in_buffer.close()
self.in_stderr_buffer.close()
if self._pipe is not None:
self._pipe.set_forever()
finally:
self.lock.release()
self._log(DEBUG, "EOF received ({})".format(self._name))
def _handle_close(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
# ...internals...
def _send(self, s, m):
size = len(s)
self.lock.acquire()
try:
if self.closed:
# this doesn't seem useful, but it is the documented behavior
# of Socket
raise socket.error("Socket is closed")
size = self._wait_for_send_window(size)
if size == 0:
# eof or similar
return 0
m.add_string(s[:size])
finally:
self.lock.release()
# Note: We release self.lock before calling _send_user_message.
# Otherwise, we can deadlock during re-keying.
self.transport._send_user_message(m)
return size
def _log(self, level, msg, *args):
self.logger.log(level, "[chan " + self._name + "] " + msg, *args)
def _event_pending(self):
self.event.clear()
self.event_ready = False
def _wait_for_event(self):
self.event.wait()
assert self.event.is_set()
if self.event_ready:
return
e = self.transport.get_exception()
if e is None:
e = SSHException("Channel closed.")
raise e
def _set_closed(self):
# you are holding the lock.
self.closed = True
self.in_buffer.close()
self.in_stderr_buffer.close()
self.out_buffer_cv.notify_all()
# Notify any waiters that we are closed
self.event.set()
self.status_event.set()
if self._pipe is not None:
self._pipe.set_forever()
def _send_eof(self):
# you are holding the lock.
if self.eof_sent:
return None
m = Message()
m.add_byte(cMSG_CHANNEL_EOF)
m.add_int(self.remote_chanid)
self.eof_sent = True
self._log(DEBUG, "EOF sent ({})".format(self._name))
return m
def _close_internal(self):
# you are holding the lock.
if not self.active or self.closed:
return None, None
m1 = self._send_eof()
m2 = Message()
m2.add_byte(cMSG_CHANNEL_CLOSE)
m2.add_int(self.remote_chanid)
self._set_closed()
# can't unlink from the Transport yet -- the remote side may still
# try to send meta-data (exit-status, etc)
return m1, m2
def _unlink(self):
# server connection could die before we become active:
# still signal the close!
if self.closed:
return
self.lock.acquire()
try:
self._set_closed()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
def _check_add_window(self, n):
self.lock.acquire()
try:
if self.closed or self.eof_received or not self.active:
return 0
if self.ultra_debug:
self._log(DEBUG, "addwindow {}".format(n))
self.in_window_sofar += n
if self.in_window_sofar <= self.in_window_threshold:
return 0
if self.ultra_debug:
self._log(
DEBUG, "addwindow send {}".format(self.in_window_sofar)
)
out = self.in_window_sofar
self.in_window_sofar = 0
return out
finally:
self.lock.release()
def _wait_for_send_window(self, size):
"""
(You are already holding the lock.)
Wait for the send window to open up, and allocate up to ``size`` bytes
for transmission. If no space opens up before the timeout, a timeout
exception is raised. Returns the number of bytes available to send
(may be less than requested).
"""
# you are already holding the lock
if self.closed or self.eof_sent:
return 0
if self.out_window_size == 0:
# should we block?
if self.timeout == 0.0:
raise socket.timeout()
# loop here in case we get woken up but a different thread has
# filled the buffer
timeout = self.timeout
while self.out_window_size == 0:
if self.closed or self.eof_sent:
return 0
then = time.time()
self.out_buffer_cv.wait(timeout)
if timeout is not None:
timeout -= time.time() - then
if timeout <= 0.0:
raise socket.timeout()
# we have some window to squeeze into
if self.closed or self.eof_sent:
return 0
if self.out_window_size < size:
size = self.out_window_size
if self.out_max_packet_size - 64 < size:
size = self.out_max_packet_size - 64
self.out_window_size -= size
if self.ultra_debug:
self._log(DEBUG, "window down to {}".format(self.out_window_size))
return size
|
Channel
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/command.py
|
{
"start": 756,
"end": 1553
}
|
class ____:
comment: Optional[str]
max_line_length: Optional[int]
truncate: bool
unsafe: bool
force_format_unsuppressed: bool
lint: bool
no_commit: bool
should_clean: bool
@staticmethod
def from_arguments(arguments: argparse.Namespace) -> "CommandArguments":
return CommandArguments(
comment=arguments.comment,
max_line_length=arguments.max_line_length,
truncate=arguments.truncate,
unsafe=getattr(arguments, "unsafe", False),
force_format_unsuppressed=getattr(
arguments, "force_format_unsuppressed", False
),
lint=arguments.lint,
no_commit=arguments.no_commit,
should_clean=arguments.should_clean,
)
|
CommandArguments
|
python
|
encode__django-rest-framework
|
rest_framework/versioning.py
|
{
"start": 1931,
"end": 3321
}
|
class ____(BaseVersioning):
"""
To the client this is the same style as `NamespaceVersioning`.
The difference is in the backend - this implementation uses
Django's URL keyword arguments to determine the version.
An example URL conf for two views that accept two different versions.
urlpatterns = [
re_path(r'^(?P<version>[v1|v2]+)/users/$', users_list, name='users-list'),
re_path(r'^(?P<version>[v1|v2]+)/users/(?P<pk>[0-9]+)/$', users_detail, name='users-detail')
]
GET /1.0/something/ HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in URL path.')
def determine_version(self, request, *args, **kwargs):
version = kwargs.get(self.version_param, self.default_version)
if version is None:
version = self.default_version
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
kwargs = {
self.version_param: request.version,
**(kwargs or {})
}
return super().reverse(
viewname, args, kwargs, request, format, **extra
)
|
URLPathVersioning
|
python
|
scrapy__scrapy
|
scrapy/extensions/debug.py
|
{
"start": 1983,
"end": 2367
}
|
class ____:
def __init__(self) -> None:
# win32 platforms don't support SIGUSR signals
with contextlib.suppress(AttributeError):
signal.signal(signal.SIGUSR2, self._enter_debugger) # type: ignore[attr-defined]
def _enter_debugger(self, signum: int, frame: FrameType | None) -> None:
assert frame
Pdb().set_trace(frame.f_back)
|
Debugger
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/assertsql.py
|
{
"start": 13652,
"end": 13850
}
|
class ____(EachOf):
def __init__(self, condition, rules, else_rules):
if condition:
super().__init__(*rules)
else:
super().__init__(*else_rules)
|
Conditional
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py
|
{
"start": 9752,
"end": 11237
}
|
class ____(Benchmark):
r"""
DeflectedCorrugatedSpring objective function.
This class defines the Deflected Corrugated Spring [1]_ function global
optimization problem. This is a multimodal minimization problem defined as
follows:
.. math::
f_{\text{DeflectedCorrugatedSpring}}(x) = 0.1\sum_{i=1}^n \left[ (x_i -
\alpha)^2 - \cos \left( K \sqrt {\sum_{i=1}^n (x_i - \alpha)^2}
\right ) \right ]
Where, in this exercise, :math:`K = 5` and :math:`\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 2\alpha]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: website has a different equation to the gavana codebase. The function
below is different to the equation above. Also, the global minimum is
wrong.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self._bounds = list(zip([0] * self.N, [2 * alpha] * self.N))
self.global_optimum = [[alpha for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
K, alpha = 5.0, 5.0
return (-cos(K * sqrt(sum((x - alpha) ** 2)))
+ 0.1 * sum((x - alpha) ** 2))
|
DeflectedCorrugatedSpring
|
python
|
getsentry__sentry
|
src/sentry/db/models/base.py
|
{
"start": 17047,
"end": 20284
}
|
class ____(SiloLimit):
def __init__(
self,
*modes: SiloMode,
read_only: SiloMode | Iterable[SiloMode] = (),
) -> None:
super().__init__(*modes)
self.read_only = frozenset([read_only] if isinstance(read_only, SiloMode) else read_only)
@staticmethod
def _recover_model_name(obj: Any) -> str | None:
# obj may be a model, manager, or queryset
if isinstance(obj, Model):
return type(obj).__name__
model_attr = getattr(obj, "model", None)
if model_attr and isinstance(model_attr, type) and issubclass(model_attr, Model):
return model_attr.__name__
return None
def handle_when_unavailable(
self,
original_method: Callable[..., Any],
current_mode: SiloMode,
available_modes: Iterable[SiloMode],
) -> Callable[..., Any]:
def handle(obj: Any, *args: Any, **kwargs: Any) -> None:
model_name = self._recover_model_name(obj)
method_name = (model_name + "." if model_name else "") + original_method.__name__
mode_str = ", ".join(str(m) for m in available_modes)
message = (
f"Called `{method_name}` on server in {current_mode} mode. "
f"{model_name or 'The model'} is available only in: {mode_str}"
)
raise self.AvailabilityError(message)
return handle
def _check_type(self, model_class: object) -> None:
# split out so mypy doesn't erroneously narrow
if not (isinstance(model_class, type) and issubclass(model_class, models.Model)):
raise TypeError("`@ModelSiloLimit ` must decorate a Model class")
def __call__(self, model_class: type[ModelClass]) -> type[ModelClass]:
self._check_type(model_class)
setattr(
model_class,
"objects",
create_silo_limited_copy(getattr(model_class, "objects"), self),
)
# On the model (not manager) class itself, find all methods that are tagged
# with the `alters_data` meta-attribute and replace them with overrides.
for model_attr_name in dir(model_class):
model_attr = getattr(model_class, model_attr_name)
if callable(model_attr) and getattr(model_attr, "alters_data", False):
override = self.create_override(model_attr)
override.alters_data = True # type: ignore[attr-defined]
# We have to resort to monkey-patching here. Dynamically extending
# and replacing the model class is not an option, because that would
# trigger hooks in Django's ModelBase metaclass a second time.
setattr(model_class, model_attr_name, override)
getattr(model_class, "_meta").silo_limit = self
return model_class
control_silo_model = ModelSiloLimit(SiloMode.CONTROL)
"""
Apply to models that are shared by multiple organizations or
require strong consistency with other Control silo resources.
"""
region_silo_model = ModelSiloLimit(SiloMode.REGION)
"""
Apply to models that belong to a single organization or
require strong consistency with other Region silo resources.
"""
|
ModelSiloLimit
|
python
|
Textualize__textual
|
src/textual/canvas.py
|
{
"start": 3980,
"end": 8420
}
|
class ____:
"""A character canvas."""
def __init__(self, width: int, height: int) -> None:
"""
Args:
width: Width of the canvas (in cells).
height Height of the canvas (in cells).
"""
self._width = width
self._height = height
blank_line = " " * width
array_type_code = "w" if sys.version_info >= (3, 13) else "u"
self.lines: list[array[str]] = [
array(array_type_code, blank_line) for _ in range(height)
]
self.box: list[defaultdict[int, Quad]] = [
defaultdict(lambda: (0, 0, 0, 0)) for _ in range(height)
]
self.spans: list[list[_Span]] = [[] for _ in range(height)]
@property
def width(self) -> int:
"""The canvas width."""
return self._width
@property
def height(self) -> int:
"""The canvas height."""
return self._height
def x_range(self, start: int, end: int) -> range:
"""Range of x values, clipped to the canvas dimensions.
Args:
start: Start index.
end: End index.
Returns:
A range object.
"""
return range(
clamp(start, 0, self._width),
clamp(end, 0, self._width),
)
def y_range(self, start: int, end: int) -> range:
"""Range of y values, clipped to the canvas dimensions.
Args:
start: Start index.
end: End index.
Returns:
A range object.
"""
return range(
clamp(start, 0, self._height),
clamp(end, 0, self._height),
)
def render(
self, primitives: Sequence[Primitive], base_style: Style
) -> StripRenderable:
"""Render the canvas.
Args:
primitives: A sequence of primitives.
base_style: The base style of the canvas.
Returns:
A Rich renderable for the canvas.
"""
for primitive in primitives:
primitive.render(self)
get_box = BOX_CHARACTERS.__getitem__
for box, line in zip(self.box, self.lines):
for offset, quad in box.items():
line[offset] = get_box(quad)
width = self._width
span_sort_key = itemgetter(0, 1)
strips: list[Strip] = []
color = (
Color.from_rich_color(base_style.bgcolor)
if base_style.bgcolor
else Color.parse("transparent")
)
_Segment = Segment
for raw_spans, line in zip(self.spans, self.lines):
text = line.tounicode()
if raw_spans:
segments: list[Segment] = []
colors = [color] + [span.color for span in raw_spans]
spans = [
(0, False, 0),
*(
(span.start, False, index)
for index, span in enumerate(raw_spans, 1)
),
*(
(span.end, True, index)
for index, span in enumerate(raw_spans, 1)
),
(width, True, 0),
]
spans.sort(key=span_sort_key)
color_indices: set[int] = set()
color_remove = color_indices.discard
color_add = color_indices.add
for (offset, leaving, style_id), (next_offset, _, _) in zip(
spans, spans[1:]
):
if leaving:
color_remove(style_id)
else:
color_add(style_id)
if next_offset > offset:
segments.append(
_Segment(
text[offset:next_offset],
base_style
+ Style.from_color(
colors[
max(color_indices) if color_indices else 0
].rich_color
),
)
)
strips.append(Strip(segments, width))
else:
strips.append(Strip([_Segment(text, base_style)], width))
return StripRenderable(strips, width)
|
Canvas
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/transfers/local_to_s3.py
|
{
"start": 1079,
"end": 4213
}
|
class ____(BaseOperator):
"""
Uploads a file from a local filesystem to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LocalFilesystemToS3Operator`
:param filename: Path to the local file. Path can be either absolute
(e.g. /path/to/file.ext) or relative (e.g. ../../foo/*/*.csv). (templated)
:param dest_key: The key of the object to copy to. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit `dest_bucket`.
:param dest_bucket: Name of the S3 bucket to where the object is copied. (templated)
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
"""
template_fields: Sequence[str] = ("filename", "dest_key", "dest_bucket")
def __init__(
self,
*,
filename: str,
dest_key: str,
dest_bucket: str | None = None,
aws_conn_id: str | None = "aws_default",
verify: str | bool | None = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.filename = filename
self.dest_key = dest_key
self.dest_bucket = dest_bucket
self.aws_conn_id = aws_conn_id
self.verify = verify
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
s3_bucket, s3_key = s3_hook.get_s3_bucket_key(
self.dest_bucket, self.dest_key, "dest_bucket", "dest_key"
)
s3_hook.load_file(
self.filename,
s3_key,
s3_bucket,
self.replace,
self.encrypt,
self.gzip,
self.acl_policy,
)
|
LocalFilesystemToS3Operator
|
python
|
fluentpython__example-code
|
13-op-overloading/vector_v8.py
|
{
"start": 6646,
"end": 10087
}
|
class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
# BEGIN VECTOR_V8_EQ
def __eq__(self, other):
if isinstance(other, Vector): # <1>
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
else:
return NotImplemented # <2>
# END VECTOR_V8_EQ
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'): # hyperspherical coordinates
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)],
self.angles())
outer_fmt = '<{}>'
else:
coords = self
outer_fmt = '({})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
def __rmul__(self, scalar):
return self * scalar
|
Vector
|
python
|
tensorflow__tensorflow
|
tensorflow/python/profiler/profile_context.py
|
{
"start": 4136,
"end": 13577
}
|
class ____(object):
"""A Context that captures RunMetadata and performs profiling.
```python
# Trace steps 100~200, profile at [150, 200] and dump profile at 200.
with profile_context.ProfileContext('/tmp/train_dir',
trace_steps=range(100, 200, 3),
dump_steps=[200]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling('op', opts, [150, 200])
train_loop().
# Tracing only.
with profile_context.tfprof.ProfileContext('/tmp/train_dir') as pctx:
# Run train/eval loop for at least few hundred steps. Profiles will be
# dumped to train_dir. Use web UI or command line to do profiling.
train_loop().
# When session object is available, do explicit trace, profile and dump.
with profile_context.ProfileContext('/tmp/train_dir',
trace_steps=[],
dump_steps=[]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.trace_next_step()
_ = session.run(train_op)
pctx.profiler.profile_operations(options=opts)
```
Args:
profile_dir: Directory to store profiles.
trace_steps: A list of session run steps to trace. If None, use
pre-defined steps.
dump_steps: A list of steps to dump the profile to `profile_dir`. If None,
use pre-defined steps.
enabled: If false, everything is disabled with minimal overhead. It allows
user to only enable profiling when needed.
debug: If true, also dumps the raw trace RunMetadata text file to
profile_dir. And print debugging message. Useful for bug report.
"""
def __init__(self,
profile_dir,
trace_steps=None,
dump_steps=None,
enabled=True,
debug=False):
self._enabled = enabled
if not self._enabled:
return
self._debug = debug
if not profile_dir:
raise ValueError('Must have a directory for profile.\n')
self._profiler_dir = profile_dir
if trace_steps is None:
self._trace_steps = set()
self._auto_tracing = True
else:
if len(trace_steps) > MAX_TRACED_STEPS:
raise ValueError('Only support tracing up to 100 steps.\n')
self._trace_steps = set(trace_steps[:])
self._auto_tracing = False
if dump_steps is None:
self._dump_steps = set([MAX_TRACED_STEPS])
else:
self._dump_steps = set(dump_steps[:])
self._rng = random.Random(111)
self._fetched = set()
self._slow_path_steps = self._dump_steps | self._trace_steps
self._trace_next_step = False
self._dump_next_step = False
self._step = 0
self._traced_steps = 0
self._auto_profiles = []
self._profiler = None
self._views = {}
self._lock = threading.Lock()
def get_profiles(self, cmd):
"""Returns profiling results for each step at which `cmd` was run.
Args:
cmd: string, profiling command used in an `add_auto_profiling` call.
Returns:
dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which
the profiling command was run. Values are the outputs of profiling.
For "code" and "op" commands this will be a `MultiGraphNodeProto`, for
"scope" and "graph" commands this will be a `GraphNodeProto.
Raises:
ValueError: if `cmd` was never run (either because no session.run call was
made or because there was no `add_auto_profiling` call with the specified
`cmd`.
"""
if cmd not in self._views:
raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))
return self._views[cmd]
def add_auto_profiling(self, cmd, options, profile_steps):
"""Traces and profiles at some session run steps.
Args:
cmd: The profiling commands. (i.e. scope, op, python, graph)
options: The profiling options.
profile_steps: A list/set of integers. The profiling command and options
will be run automatically at these integer steps. Each step is
a session.run.
"""
if not self._enabled:
return
self._auto_profiles.append((cmd, options, profile_steps[:]))
self._slow_path_steps |= set(profile_steps)
self._trace_steps |= set(profile_steps)
@property
def profiler(self):
"""Returns the current profiler object."""
if not self._enabled:
return None
if not self._profiler:
self._profiler = model_analyzer.Profiler(ops.get_default_graph())
return self._profiler
def trace_next_step(self):
"""Enables tracing and adds traces to profiler at next step."""
if not self._enabled:
return
self._trace_next_step = True
self._slow_path_steps.add(self._step)
def dump_next_step(self):
"""Enable tracing and dump profiles at next step."""
if not self._enabled:
return
self._dump_next_step = True
self._slow_path_steps.add(self._step)
def _is_fast_path(self, step):
if step in self._slow_path_steps:
return False
# When user doesn't set the tracing steps explicitly, auto decide it.
if (self._auto_tracing and step > WARMUP_STEPS and
self._traced_steps <= MAX_TRACED_STEPS):
return False
return True
def _should_trace(self, step, graph, fetches):
"""Whether should do tracing at current step."""
if self._traced_steps > MAX_TRACED_STEPS:
return False
# Check user-set tracing steps.
if step in self._trace_steps or self._trace_next_step:
self._traced_steps += 1
return True
# If no user-set tracing steps set and passes warm up steps, auto trace.
if self._auto_tracing and step > WARMUP_STEPS:
# If the fetches have not been seen before, trace it.
with graph.as_default():
fetch_names = [f.name for f in
session._FetchMapper.for_fetch(fetches).unique_fetches()] # pylint: disable=protected-access
fetch_name = '-'.join(sorted(fetch_names))
if self._debug:
sys.stderr.write('debug: trace fetches: %s\n' % fetch_name)
if fetch_name not in self._fetched:
self._fetched.add(fetch_name)
self._traced_steps += 1
return True
# If the trace coverage is low, does some random tracing.
if (self.profiler._coverage < 0.5 and step < MAX_TRACED_STEPS and # pylint: disable=protected-access
self._rng.randint(0, 10) < 2):
self._traced_steps += 1
return True
return False
def _maybe_dump(self, step):
"""Maybe dump the profile file."""
if not (step in self._dump_steps or self._dump_next_step):
return
if self._debug:
sys.stderr.write('debug: dumping file at step: %d\n' % step)
gfile.MakeDirs(self._profiler_dir)
filename = os.path.join(compat.as_bytes(self._profiler_dir),
compat.as_bytes('profile_%d' % step))
self.profiler._write_profile(filename) # pylint: disable=protected-access
def _dump_file(self, pb, basename):
gfile.MakeDirs(self._profiler_dir)
with gfile.Open(os.path.join(self._profiler_dir, basename), 'w') as f:
f.write('%s' % pb)
@contextlib.contextmanager
def _new_step(self):
acquired = self._lock.acquire(False) # pylint: disable=assignment-from-no-return
yield (self._step, acquired)
self._step += 1
self._trace_next_step = False
self._dump_next_step = False
if acquired:
self._lock.release()
def _profile_candidates(self):
to_profile = []
for auto_prof in self._auto_profiles:
_, _, prof_steps = auto_prof
if self._step in prof_steps:
to_profile.append(auto_prof)
return to_profile
def __enter__(self):
if self._enabled:
self.old_run = getattr(session.BaseSession, 'run', None)
self.old_init = getattr(session.BaseSession, '__init__', None)
if not self.old_run:
raise errors.InternalError(None, None, 'BaseSession misses run method.')
elif not self.old_init:
raise errors.InternalError(None, None,
'BaseSession misses __init__ method.')
elif getattr(session.BaseSession, '_profiler_run_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
elif getattr(session.BaseSession, '_profiler_init_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
else:
setattr(session.BaseSession, 'run', _profiled_run)
setattr(session.BaseSession, '__init__', _profiled_init)
setattr(session.BaseSession, '_profiler_run_internal', self.old_run)
setattr(session.BaseSession, '_profiler_init_internal', self.old_init)
setattr(session.BaseSession, 'profile_context', self)
return self
else:
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if not self._enabled:
return
print_mdl.DeleteProfiler()
setattr(session.BaseSession, 'run', self.old_run)
setattr(session.BaseSession, '__init__', self.old_init)
setattr(session.BaseSession, '_profiler_run_internal', None)
setattr(session.BaseSession, '_profiler_init_internal', None)
setattr(session.BaseSession, 'profile_context', None)
|
ProfileContext
|
python
|
Textualize__textual
|
tests/selection_list/test_over_wide_selections.py
|
{
"start": 183,
"end": 821
}
|
class ____(App[None]):
"""Test selection list application."""
CSS = """
OptionList {
width: 20;
}
"""
def compose(self) -> ComposeResult:
yield SelectionList[int](*[(f"{n} ", n) for n in range(10)])
async def test_over_wide_options() -> None:
"""Options wider than the widget should not be an issue."""
async with SelectionListApp().run_test() as pilot:
assert pilot.app.query_one(SelectionList).highlighted == 0
await pilot.pause()
assert pilot.app.query_one(SelectionList).highlighted == 0
if __name__ == "__main__":
SelectionListApp().run()
|
SelectionListApp
|
python
|
davidhalter__jedi
|
jedi/plugins/stdlib.py
|
{
"start": 23779,
"end": 24844
}
|
class ____(ValueWrapper):
def __init__(self, instance, args_value_set):
super().__init__(instance)
self._args_value_set = args_value_set
@repack_with_argument_clinic('item, /')
def py__call__(self, item_value_set):
value_set = NO_VALUES
for args_value in self._args_value_set:
lazy_values = list(args_value.py__iter__())
if len(lazy_values) == 1:
# TODO we need to add the contextualized value.
value_set |= item_value_set.get_item(lazy_values[0].infer(), None)
else:
value_set |= ValueSet([iterable.FakeList(
self._wrapped_value.inference_state,
[
LazyKnownValues(item_value_set.get_item(lazy_value.infer(), None))
for lazy_value in lazy_values
],
)])
return value_set
@argument_clinic('func, /')
def _functools_wraps(funcs):
return ValueSet(WrapsCallable(func) for func in funcs)
|
ItemGetterCallable
|
python
|
spyder-ide__spyder
|
external-deps/qtconsole/qtconsole/rich_jupyter_widget.py
|
{
"start": 18210,
"end": 18478
}
|
class ____(RichJupyterWidget):
"""Deprecated class. Use RichJupyterWidget."""
def __init__(self, *a, **kw):
warn("RichIPythonWidget is deprecated, use RichJupyterWidget",
DeprecationWarning)
super().__init__(*a, **kw)
|
RichIPythonWidget
|
python
|
huggingface__transformers
|
src/transformers/models/unispeech_sat/modular_unispeech_sat.py
|
{
"start": 3729,
"end": 6015
}
|
class ____(Wav2Vec2GumbelVectorQuantizer):
def __init__(self, config):
super().__init__(config)
self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars)
@staticmethod
def _compute_perplexity(probs, mask=None):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
@auto_docstring
|
UniSpeechSatGumbelVectorQuantizer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/optional1.py
|
{
"start": 81,
"end": 1287
}
|
class ____:
def __init__(self):
self.value = 3
def do_stuff(self):
pass
def __enter__(self):
return 3
def __exit__(
self,
t: Optional[type] = None,
exc: Optional[BaseException] = None,
tb: Optional[Any] = None,
) -> bool:
return True
a = None
if 1:
a = Foo()
# If "reportOptionalMemberAccess" is enabled,
# this should generate an error.
a.value = 3
def foo():
pass
b = None
if 1:
b = foo
# If "reportOptionalCall" is enabled,
# this should generate an error.
b()
c = None
if 1:
c = [3, 4, 5]
# If "reportOptionalSubscript" is enabled,
# this should generate an error.
c[2]
# If "reportOptionalIterable" is enabled,
# this should generate an error.
for val in c:
pass
# If "reportOptionalContextManager" is enabled,
# this should generate an error.
cm = None
if 1:
cm = Foo()
with cm as val:
pass
e = None
if 1:
e = 4
# If "reportOptionalOperand" is enabled,
# this should generate an error.
v1 = e + 4
# If "reportOptionalOperand" is enabled,
# this should generate an error.
v2 = e < 5
# If "reportOptionalOperand" is enabled,
# this should generate an error.
v3 = ~e
|
Foo
|
python
|
openai__openai-python
|
src/openai/resources/audio/transcriptions.py
|
{
"start": 49925,
"end": 50200
}
|
class ____:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.async_to_raw_response_wrapper(
transcriptions.create,
)
|
AsyncTranscriptionsWithRawResponse
|
python
|
django-extensions__django-extensions
|
django_extensions/collision_resolvers.py
|
{
"start": 8499,
"end": 11085
}
|
class ____:
def __init__(self):
pass
def run_collision_resolver(self, models_to_import):
# type: (Dict[str, List[str]]) -> Dict[str, List[Tuple[str, str]]]
dictionary_of_names = self._get_dictionary_of_names(models_to_import) # type: Dict[str, str]
return self._get_dictionary_of_modules(dictionary_of_names)
@classmethod
def _get_dictionary_of_names(cls, models_to_import): # type: (Dict[str, List[str]]) -> (Dict[str, str])
from django.conf import settings
collision_resolver_class = import_string(
getattr(
settings,
"SHELL_PLUS_MODEL_IMPORTS_RESOLVER",
"django_extensions.collision_resolvers.LegacyCR",
)
)
cls._assert_is_collision_resolver_class_correct(collision_resolver_class)
result = collision_resolver_class().resolve_collisions(models_to_import)
cls._assert_is_collision_resolver_result_correct(result)
return result
@classmethod
def _assert_is_collision_resolver_result_correct(cls, result):
assert isinstance(result, dict), (
"Result of resolve_collisions function must be a dict!"
)
for key, value in result.items():
assert isinstance(key, str), (
"key in collision resolver result should be str not %s" % key
)
assert isinstance(value, str), (
"value in collision resolver result should be str not %s" % value
)
@classmethod
def _assert_is_collision_resolver_class_correct(cls, collision_resolver_class):
assert inspect.isclass(collision_resolver_class) and issubclass(
collision_resolver_class, BaseCR
), "SHELL_PLUS_MODEL_IMPORTS_RESOLVER must be subclass of BaseCR!"
assert (
len(
inspect.getfullargspec(collision_resolver_class.resolve_collisions).args
)
== 2
), "resolve_collisions function must take one argument!"
@classmethod
def _get_dictionary_of_modules(cls, dictionary_of_names):
# type: (Dict[str, str]) -> Dict[str, List[Tuple[str, str]]]
dictionary_of_modules = {} # type: Dict[str, List[Tuple[str, str]]]
for alias, model in dictionary_of_names.items():
module_path, model_name = model.rsplit(".", 1)
dictionary_of_modules.setdefault(module_path, [])
dictionary_of_modules[module_path].append((model_name, alias))
return dictionary_of_modules
|
CollisionResolvingRunner
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
|
{
"start": 67767,
"end": 69432
}
|
class ____(BaseModel):
type: Literal["SimpleRetriever"]
record_selector: RecordSelector = Field(
...,
description="Component that describes how to extract records from a HTTP response.",
)
requester: Union[CustomRequester, HttpRequester] = Field(
...,
description="Requester component that describes how to prepare HTTP requests to send to the source API.",
)
paginator: Optional[Union[DefaultPaginator, NoPagination]] = Field(
None,
description="Paginator component that describes how to navigate through the API's pages.",
)
ignore_stream_slicer_parameters_on_paginated_requests: Optional[bool] = Field(
False,
description="If true, the partition router and incremental request options will be ignored when paginating requests. Request options set directly on the requester will not be ignored.",
)
partition_router: Optional[
Union[
CustomPartitionRouter,
ListPartitionRouter,
SubstreamPartitionRouter,
List[Union[CustomPartitionRouter, ListPartitionRouter, SubstreamPartitionRouter]],
]
] = Field(
[],
description="PartitionRouter component that describes how to partition the stream, enabling incremental syncs and checkpointing.",
title="Partition Router",
)
decoder: Optional[Union[JsonDecoder, JsonlDecoder, IterableDecoder]] = Field(
None,
description="Component decoding the response so records can be extracted.",
title="Decoder",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
|
SimpleRetriever
|
python
|
streamlit__streamlit
|
lib/streamlit/errors.py
|
{
"start": 5832,
"end": 6289
}
|
class ____(LocalizableStreamlitException):
"""Exception raised when an invalid value is specified for `initial_sidebar_state`."""
def __init__(self, initial_sidebar_state: str) -> None:
super().__init__(
'`initial_sidebar_state` must be `"auto"` or `"expanded"` or '
'`"collapsed"` (got `"{initial_sidebar_state}"`)',
initial_sidebar_state=initial_sidebar_state,
)
|
StreamlitInvalidSidebarStateError
|
python
|
ray-project__ray
|
python/ray/serve/llm/__init__.py
|
{
"start": 1653,
"end": 1807
}
|
class ____(_LLMServer):
pass
@Deprecated(
old="ray.serve.llm.LLMRouter",
new="ray.serve.llm.ingress.OpenAIIngress",
error=False,
)
|
LLMServer
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/widgets/base.py
|
{
"start": 12997,
"end": 15812
}
|
class ____:
"""
Clickable button.
:param text: The caption for the button.
:param handler: `None` or callable. Called when the button is clicked. No
parameters are passed to this callable. Use for instance Python's
`functools.partial` to pass parameters to this callable if needed.
:param width: Width of the button.
"""
def __init__(
self,
text: str,
handler: Callable[[], None] | None = None,
width: int = 12,
left_symbol: str = "<",
right_symbol: str = ">",
) -> None:
self.text = text
self.left_symbol = left_symbol
self.right_symbol = right_symbol
self.handler = handler
self.width = width
self.control = FormattedTextControl(
self._get_text_fragments,
key_bindings=self._get_key_bindings(),
focusable=True,
)
def get_style() -> str:
if get_app().layout.has_focus(self):
return "class:button.focused"
else:
return "class:button"
# Note: `dont_extend_width` is False, because we want to allow buttons
# to take more space if the parent container provides more space.
# Otherwise, we will also truncate the text.
# Probably we need a better way here to adjust to width of the
# button to the text.
self.window = Window(
self.control,
align=WindowAlign.CENTER,
height=1,
width=width,
style=get_style,
dont_extend_width=False,
dont_extend_height=True,
)
def _get_text_fragments(self) -> StyleAndTextTuples:
width = (
self.width
- (get_cwidth(self.left_symbol) + get_cwidth(self.right_symbol))
+ (len(self.text) - get_cwidth(self.text))
)
text = (f"{{:^{max(0, width)}}}").format(self.text)
def handler(mouse_event: MouseEvent) -> None:
if (
self.handler is not None
and mouse_event.event_type == MouseEventType.MOUSE_UP
):
self.handler()
return [
("class:button.arrow", self.left_symbol, handler),
("[SetCursorPosition]", ""),
("class:button.text", text, handler),
("class:button.arrow", self.right_symbol, handler),
]
def _get_key_bindings(self) -> KeyBindings:
"Key bindings for the Button."
kb = KeyBindings()
@kb.add(" ")
@kb.add("enter")
def _(event: E) -> None:
if self.handler is not None:
self.handler()
return kb
def __pt_container__(self) -> Container:
return self.window
|
Button
|
python
|
getsentry__sentry
|
src/sentry/api/bases/organization.py
|
{
"start": 6788,
"end": 7098
}
|
class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read", "org:write", "org:admin"],
"POST": ["org:read", "org:write", "org:admin"],
"PUT": ["org:read", "org:write", "org:admin"],
"DELETE": ["org:read", "org:write", "org:admin"],
}
|
OrganizationSearchPermission
|
python
|
tiangolo__fastapi
|
docs_src/websockets/tutorial003_py39.py
|
{
"start": 1335,
"end": 2549
}
|
class ____:
def __init__(self):
self.active_connections: list[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_text(message)
async def broadcast(self, message: str):
for connection in self.active_connections:
await connection.send_text(message)
manager = ConnectionManager()
@app.get("/")
async def get():
return HTMLResponse(html)
@app.websocket("/ws/{client_id}")
async def websocket_endpoint(websocket: WebSocket, client_id: int):
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_text()
await manager.send_personal_message(f"You wrote: {data}", websocket)
await manager.broadcast(f"Client #{client_id} says: {data}")
except WebSocketDisconnect:
manager.disconnect(websocket)
await manager.broadcast(f"Client #{client_id} left the chat")
|
ConnectionManager
|
python
|
streamlit__streamlit
|
lib/streamlit/components/v2/manifest_scanner.py
|
{
"start": 2264,
"end": 20976
}
|
class ____:
"""Structured configuration for a single component entry.
Parameters
----------
name
Component name as declared in ``pyproject.toml``.
asset_dir
Optional relative directory containing component assets.
"""
name: str
asset_dir: str | None = None
@staticmethod
def from_dict(config: dict[str, Any]) -> ComponentConfig:
"""Create a ComponentConfig from a raw dict.
Parameters
----------
config
Raw component dictionary parsed from TOML.
Returns
-------
ComponentConfig
Parsed and validated component configuration.
"""
name_value = config.get("name")
if not isinstance(name_value, str) or not name_value:
# Fail closed: invalid component entry
raise ValueError("Component entry missing required 'name' field")
asset_dir_value = config.get("asset_dir")
if asset_dir_value is not None and not isinstance(asset_dir_value, str):
# Fail closed: invalid asset_dir value
raise ValueError("'asset_dir' must be a string")
return ComponentConfig(
name=name_value,
asset_dir=asset_dir_value,
)
@staticmethod
def parse_or_none(config: dict[str, Any]) -> ComponentConfig | None:
"""Best-effort parse without raising; returns None on malformed input."""
try:
return ComponentConfig.from_dict(config)
except Exception as e:
_LOGGER.debug("Skipping malformed component entry: %s", e)
return None
def resolve_asset_root(self, package_root: Path) -> Path | None:
"""Resolve and security-check the component's asset root directory.
Parameters
----------
package_root : Path
The root directory of the installed component package.
Returns
-------
Path | None
Absolute, resolved path to the asset directory, or ``None`` if
``asset_dir`` is not declared.
Raises
------
StreamlitComponentRegistryError
If the declared directory does not exist, is not a directory, or
resolves outside of ``package_root``.
"""
if self.asset_dir is None:
return None
# Validate the configured path string first
ComponentPathUtils.validate_path_security(self.asset_dir)
asset_root = (package_root / self.asset_dir).resolve()
if not asset_root.exists() or not asset_root.is_dir():
raise StreamlitComponentRegistryError(
f"Declared asset_dir '{self.asset_dir}' for component '{self.name}' "
f"does not exist or is not a directory under package root '{package_root}'."
)
# Ensure the resolved directory is within the package root after following symlinks
ComponentPathUtils.ensure_within_root(
abs_path=asset_root,
root=package_root.resolve(),
kind="asset_dir",
)
return asset_root
def _is_likely_streamlit_component_package(
dist: importlib.metadata.Distribution,
) -> bool:
"""Check if a package is likely to contain streamlit components before
expensive operations.
This early filter reduces the number of packages that need file I/O
operations from potentially hundreds down to just a few candidates.
Parameters
----------
dist : importlib.metadata.Distribution
The package distribution to check.
Returns
-------
bool
True if the package might contain streamlit components, False otherwise.
"""
# Get package metadata
name = dist.name.lower()
summary = dist.metadata["Summary"].lower() if "Summary" in dist.metadata else ""
# Filter 1: Package name suggests streamlit component
if "streamlit" in name:
return True
# Filter 2: Package description mentions streamlit
if "streamlit" in summary:
return True
# Filter 3: Check if package depends on streamlit
try:
# Check requires_dist for streamlit dependency
requires_dist = dist.metadata.get_all("Requires-Dist") or []
for requirement in requires_dist:
if requirement and "streamlit" in requirement.lower():
return True
except Exception as e:
# Don't fail on metadata parsing issues, but log for debugging purposes
_LOGGER.debug(
"Failed to parse package metadata for streamlit component detection: %s", e
)
# Filter 4: Check if this is a known streamlit ecosystem package
# Common patterns in streamlit component package names. Use anchored checks to
# avoid matching unrelated packages like "test-utils".
return name.startswith(("streamlit-", "streamlit_", "st-", "st_"))
def _find_package_pyproject_toml(dist: importlib.metadata.Distribution) -> Path | None:
"""Find ``pyproject.toml`` for a package.
Handles both regular and editable installs. The function uses increasingly
permissive strategies to locate the file while validating that the file
belongs to the given distribution.
Parameters
----------
dist : importlib.metadata.Distribution
The package distribution to find pyproject.toml for.
Returns
-------
Path | None
Path to the ``pyproject.toml`` file if found, otherwise ``None``.
"""
package_name = _normalize_package_name(dist.name)
# Try increasingly permissive strategies
for finder in (
_pyproject_via_read_text,
_pyproject_via_dist_files,
lambda d: _pyproject_via_import_spec(d, package_name),
):
result = finder(dist)
if result is not None:
return result
return None
def _pyproject_via_read_text(dist: importlib.metadata.Distribution) -> Path | None:
"""Locate pyproject.toml using the distribution's read_text + nearby files.
This works for many types of installations including some editable ones.
"""
package_name = _normalize_package_name(dist.name)
try:
if hasattr(dist, "read_text"):
pyproject_content = dist.read_text("pyproject.toml")
if pyproject_content and dist.files:
# Found content, now find the actual file path
# Look for a reasonable file to get the directory
for file in dist.files:
if "__init__.py" in str(file) or ".py" in str(file):
try:
file_path = Path(str(dist.locate_file(file)))
# Check nearby directories for pyproject.toml
current_dir = file_path.parent
# Check current directory and parent
for search_dir in [current_dir, current_dir.parent]:
pyproject_path = search_dir / "pyproject.toml"
if (
pyproject_path.exists()
and _validate_pyproject_for_package(
pyproject_path,
dist.name,
package_name,
)
):
return pyproject_path
# Stop after first reasonable file
break
except Exception: # noqa: S112
continue
except Exception:
return None
return None
def _pyproject_via_dist_files(dist: importlib.metadata.Distribution) -> Path | None:
"""Locate pyproject.toml by scanning the distribution's file list."""
package_name = _normalize_package_name(dist.name)
files = getattr(dist, "files", None)
if not files:
return None
for file in files:
if getattr(file, "name", None) == "pyproject.toml" or str(file).endswith(
"pyproject.toml"
):
try:
pyproject_path = Path(str(dist.locate_file(file)))
if _validate_pyproject_for_package(
pyproject_path,
dist.name,
package_name,
):
return pyproject_path
except Exception: # noqa: S112
continue
return None
def _pyproject_via_import_spec(
dist: importlib.metadata.Distribution, package_name: str
) -> Path | None:
"""Locate pyproject.toml by resolving the import spec and checking nearby.
For editable installs, try the package directory and its parent only.
"""
try:
spec = importlib.util.find_spec(package_name)
if spec and spec.origin:
package_dir = Path(spec.origin).parent
for search_dir in [package_dir, package_dir.parent]:
pyproject_path = search_dir / "pyproject.toml"
if pyproject_path.exists() and _validate_pyproject_for_package(
pyproject_path,
dist.name,
package_name,
):
return pyproject_path
except Exception:
return None
return None
def _validate_pyproject_for_package(
pyproject_path: Path, dist_name: str, package_name: str
) -> bool:
"""Validate that a ``pyproject.toml`` file belongs to the specified package.
Parameters
----------
pyproject_path : Path
Path to the pyproject.toml file to validate.
dist_name : str
The distribution name (e.g., "streamlit-bokeh").
package_name : str
The package name (e.g., "streamlit_bokeh").
Returns
-------
bool
True if the file belongs to this package, False otherwise.
"""
try:
with open(pyproject_path, encoding="utf-8") as f:
pyproject_data = toml.load(f)
# Check if this pyproject.toml is for the package we're looking for
project_name = None
# Try to get the project name from [project] table
if "project" in pyproject_data and "name" in pyproject_data["project"]:
project_name = pyproject_data["project"]["name"]
# Also try to get it from [tool.setuptools] or other build system configs
if (
not project_name
and "tool" in pyproject_data
and (
"setuptools" in pyproject_data["tool"]
and "package-name" in pyproject_data["tool"]["setuptools"]
)
):
project_name = pyproject_data["tool"]["setuptools"]["package-name"]
# If we found a project name, check if it matches either the dist name or package name
if project_name:
# Normalize names for comparison using PEP 503 canonicalization
# This handles hyphens, underscores, and dots consistently.
canonical_project = packaging_utils.canonicalize_name(project_name)
canonical_dist = packaging_utils.canonicalize_name(dist_name)
canonical_package = packaging_utils.canonicalize_name(package_name)
# Check if project name matches either the distribution name or the package name
return canonical_project in (canonical_dist, canonical_package)
# If we can't determine ownership, be conservative and reject it
return False
except Exception as e:
_LOGGER.debug(
"Error validating pyproject.toml at %s for %s: %s",
pyproject_path,
dist_name,
e,
)
return False
def _load_pyproject(pyproject_path: Path) -> dict[str, Any] | None:
"""Load and parse a pyproject.toml, returning parsed data or None on failure."""
try:
with open(pyproject_path, encoding="utf-8") as f:
return toml.load(f)
except Exception as e:
_LOGGER.debug("Failed to parse pyproject.toml at %s: %s", pyproject_path, e)
return None
def _extract_components(pyproject_data: dict[str, Any]) -> list[dict[str, Any]] | None:
"""Extract raw component dicts from pyproject data; return None if absent."""
streamlit_component = (
pyproject_data.get("tool", {}).get("streamlit", {}).get("component")
)
if not streamlit_component:
return None
raw_components = streamlit_component.get("components")
if not isinstance(raw_components, list):
return None
# Ensure a list of dicts for type safety
result: list[dict[str, Any]] = [
item for item in raw_components if isinstance(item, dict)
]
if not result:
return None
return result
def _resolve_package_root(
dist: importlib.metadata.Distribution, package_name: str, pyproject_path: Path
) -> Path:
"""Resolve the package root directory with fallbacks."""
package_root: Path | None = None
try:
spec = importlib.util.find_spec(package_name)
if spec and spec.origin:
package_root = Path(spec.origin).parent
except Exception as e:
_LOGGER.debug(
"Failed to resolve package root via import spec for %s: %s",
package_name,
e,
)
files = getattr(dist, "files", None)
if not package_root and files:
for file in files:
if package_name in str(file) and "__init__.py" in str(file):
try:
init_path = Path(str(dist.locate_file(file)))
package_root = init_path.parent
break
except Exception as e:
_LOGGER.debug(
"Failed to resolve package root via dist files for %s: %s",
package_name,
e,
)
if not package_root:
package_root = pyproject_path.parent
return package_root
def _derive_project_metadata(
pyproject_data: dict[str, Any], dist: importlib.metadata.Distribution
) -> tuple[str, str]:
"""Derive project name and version with safe fallbacks."""
project_table = pyproject_data.get("project", {})
derived_name = project_table.get("name") or dist.name
derived_version = project_table.get("version") or dist.version or "0.0.0"
return derived_name, derived_version
def _process_single_package(
dist: importlib.metadata.Distribution,
) -> tuple[ComponentManifest, Path] | None:
"""Process a single package to extract component manifest.
This function is designed to be called from a thread pool for parallel processing.
Parameters
----------
dist : importlib.metadata.Distribution
The package distribution to process.
Returns
-------
tuple[ComponentManifest, Path] | None
The manifest and package root if found, otherwise ``None``.
"""
try:
pyproject_path = _find_package_pyproject_toml(dist)
if not pyproject_path:
return None
pyproject_data = _load_pyproject(pyproject_path)
if pyproject_data is None:
return None
raw_components = _extract_components(pyproject_data)
if not raw_components:
return None
package_name = _normalize_package_name(dist.name)
package_root = _resolve_package_root(dist, package_name, pyproject_path)
derived_name, derived_version = _derive_project_metadata(pyproject_data, dist)
parsed_components: list[ComponentConfig] = [
parsed
for comp in raw_components
if (parsed := ComponentConfig.parse_or_none(comp)) is not None
]
if not parsed_components:
return None
manifest = ComponentManifest(
name=derived_name,
version=derived_version,
components=parsed_components,
)
return (manifest, package_root)
except Exception as e:
_LOGGER.debug(
"Unexpected error processing distribution %s: %s",
getattr(dist, "name", "<unknown>"),
e,
)
return None
def scan_component_manifests(
max_workers: int | None = None,
) -> list[tuple[ComponentManifest, Path]]:
"""Scan installed packages for Streamlit component metadata.
Uses parallel processing to improve performance in environments with many
installed packages. Applies early filtering to only check packages likely to
contain streamlit components.
Parameters
----------
max_workers : int or None
Maximum number of worker threads. If None, uses min(32, (os.cpu_count()
or 1) + 4).
Returns
-------
list[tuple[ComponentManifest, Path]]
List of tuples of manifests and their package root paths.
"""
manifests: list[tuple[ComponentManifest, Path]] = []
# Get all distributions first (this is fast)
all_distributions = list(importlib.metadata.distributions())
if not all_distributions:
return manifests
# Apply early filtering to reduce expensive file operations
candidate_distributions = [
dist
for dist in all_distributions
if _is_likely_streamlit_component_package(dist)
]
_LOGGER.debug(
"Filtered %d packages down to %d candidates for component scanning",
len(all_distributions),
len(candidate_distributions),
)
if not candidate_distributions:
return manifests
# Default max_workers follows ThreadPoolExecutor's default logic
if max_workers is None:
max_workers = min(32, (os.cpu_count() or 1) + 4)
# Clamp max_workers to reasonable bounds for this task
max_workers = min(
max_workers, len(candidate_distributions), 16
) # Don't use more threads than packages or 16
_LOGGER.debug(
"Scanning %d candidate packages for component manifests using %d worker threads",
len(candidate_distributions),
max_workers,
)
# Process packages in parallel
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all tasks
future_to_dist = {
executor.submit(_process_single_package, dist): dist.name
for dist in candidate_distributions
}
# Collect results as they complete
for future in as_completed(future_to_dist):
result = future.result()
if result:
manifests.append(result)
_LOGGER.debug("Found %d component manifests total", len(manifests))
return manifests
|
ComponentConfig
|
python
|
neetcode-gh__leetcode
|
python/0200-number-of-islands.py
|
{
"start": 1432,
"end": 2462
}
|
class ____:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
rows, cols = len(grid), len(grid[0])
visited = set()
islands = 0
def bfs(r, c):
q = deque()
visited.add((r, c))
q.append((r, c))
while q:
row, col = q.popleft()
directions = [[1, 0],[-1, 0],[0, 1],[0, -1]]
for dr, dc in directions:
r, c = row + dr, col + dc
if (r) in range(rows) and (c) in range(cols) and grid[r][c] == '1' and (r, c) not in visited:
q.append((r, c ))
visited.add((r, c ))
for r in range(rows):
for c in range(cols):
if grid[r][c] == "1" and (r, c) not in visited:
bfs(r, c)
islands += 1
return islands
|
SolutionBFS
|
python
|
sqlalchemy__sqlalchemy
|
test/base/test_result.py
|
{
"start": 636,
"end": 9174
}
|
class ____(fixtures.TestBase):
def _fixture(self, values, labels):
return result.result_tuple(labels)(values)
def test_empty(self):
keyed_tuple = self._fixture([], [])
eq_(str(keyed_tuple), "()")
eq_(len(keyed_tuple), 0)
eq_(list(keyed_tuple._mapping.keys()), [])
eq_(keyed_tuple._fields, ())
eq_(keyed_tuple._asdict(), {})
def test_values_none_labels(self):
keyed_tuple = self._fixture([1, 2], [None, None])
eq_(str(keyed_tuple), "(1, 2)")
eq_(len(keyed_tuple), 2)
eq_(list(keyed_tuple._mapping.keys()), [])
eq_(keyed_tuple._fields, ())
eq_(keyed_tuple._asdict(), {})
eq_(keyed_tuple[0], 1)
eq_(keyed_tuple[1], 2)
def test_creation(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(str(keyed_tuple), "(1, 2)")
eq_(list(keyed_tuple._mapping.keys()), ["a", "b"])
eq_(keyed_tuple._fields, ("a", "b"))
eq_(keyed_tuple._asdict(), {"a": 1, "b": 2})
def test_index_access(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(keyed_tuple[0], 1)
eq_(keyed_tuple[1], 2)
def should_raise():
keyed_tuple[2]
assert_raises(IndexError, should_raise)
def test_negative_index_access(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(keyed_tuple[-1], 2)
eq_(keyed_tuple[-2:-1], (1,))
def test_slice_access(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(keyed_tuple[0:2], (1, 2))
def test_slices_arent_in_mappings(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
if compat.py312:
with expect_raises(KeyError):
keyed_tuple._mapping[0:2]
else:
with expect_raises(TypeError):
keyed_tuple._mapping[0:2]
def test_integers_arent_in_mappings(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
assert_raises(KeyError, lambda: keyed_tuple._mapping[1])
def test_getter(self):
keyed_tuple = self._fixture([1, 2, 3], ["a", "b", "c"])
getter = keyed_tuple._parent._getter("b")
eq_(getter(keyed_tuple), 2)
getter = keyed_tuple._parent._getter(2)
eq_(getter(keyed_tuple), 3)
def test_tuple_getter(self):
keyed_tuple = self._fixture([1, 2, 3], ["a", "b", "c"])
getter = keyed_tuple._parent._row_as_tuple_getter(["b", "c"])
eq_(getter(keyed_tuple), (2, 3))
# row as tuple getter doesn't accept ints. for ints, just
# use plain python
getter = operator.itemgetter(2, 0, 1)
# getter = keyed_tuple._parent._row_as_tuple_getter([2, 0, 1])
eq_(getter(keyed_tuple), (3, 1, 2))
def test_attribute_access(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(keyed_tuple.a, 1)
eq_(keyed_tuple.b, 2)
def should_raise():
keyed_tuple.c
assert_raises(AttributeError, should_raise)
def test_contains(self):
keyed_tuple = self._fixture(["x", "y"], ["a", "b"])
is_true("x" in keyed_tuple)
is_false("z" in keyed_tuple)
is_true("z" not in keyed_tuple)
is_false("x" not in keyed_tuple)
# we don't do keys
is_false("a" in keyed_tuple)
is_false("z" in keyed_tuple)
is_true("a" not in keyed_tuple)
is_true("z" not in keyed_tuple)
def test_contains_mapping(self):
keyed_tuple = self._fixture(["x", "y"], ["a", "b"])._mapping
is_false("x" in keyed_tuple)
is_false("z" in keyed_tuple)
is_true("z" not in keyed_tuple)
is_true("x" not in keyed_tuple)
# we do keys
is_true("a" in keyed_tuple)
is_true("b" in keyed_tuple)
def test_none_label(self):
keyed_tuple = self._fixture([1, 2, 3], ["a", None, "b"])
eq_(str(keyed_tuple), "(1, 2, 3)")
eq_(list(keyed_tuple._mapping.keys()), ["a", "b"])
eq_(keyed_tuple._fields, ("a", "b"))
eq_(keyed_tuple._asdict(), {"a": 1, "b": 3})
# attribute access: can't get at value 2
eq_(keyed_tuple.a, 1)
eq_(keyed_tuple.b, 3)
# index access: can get at value 2
eq_(keyed_tuple[0], 1)
eq_(keyed_tuple[1], 2)
eq_(keyed_tuple[2], 3)
def test_duplicate_labels(self):
keyed_tuple = self._fixture([1, 2, 3], ["a", "b", "b"])
eq_(str(keyed_tuple), "(1, 2, 3)")
eq_(list(keyed_tuple._mapping.keys()), ["a", "b", "b"])
eq_(keyed_tuple._fields, ("a", "b", "b"))
eq_(keyed_tuple._asdict(), {"a": 1, "b": 3})
# attribute access: can't get at value 2
eq_(keyed_tuple.a, 1)
eq_(keyed_tuple.b, 3)
# index access: can get at value 2
eq_(keyed_tuple[0], 1)
eq_(keyed_tuple[1], 2)
eq_(keyed_tuple[2], 3)
def test_immutable(self):
keyed_tuple = self._fixture([1, 2], ["a", "b"])
eq_(str(keyed_tuple), "(1, 2)")
eq_(keyed_tuple.a, 1)
# eh
# assert_raises(AttributeError, setattr, keyed_tuple, "a", 5)
def should_raise():
keyed_tuple[0] = 100
assert_raises(TypeError, should_raise)
def test_serialize(self):
keyed_tuple = self._fixture([1, 2, 3], ["a", None, "b"])
for loads, dumps in picklers():
kt = loads(dumps(keyed_tuple))
eq_(str(kt), "(1, 2, 3)")
eq_(list(kt._mapping.keys()), ["a", "b"])
eq_(kt._fields, ("a", "b"))
eq_(kt._asdict(), {"a": 1, "b": 3})
@testing.fixture
def _load_module(self):
from sqlalchemy.engine import _row_cy as _cy_row
_py_row = load_uncompiled_module(_cy_row)
# allow pickle to serialize the two rowproxy_reconstructor functions
# create a new virtual module
new_name = _py_row.__name__ + "py_only"
sys.modules[new_name] = _py_row
_py_row.__name__ = new_name
for item in vars(_py_row).values():
# only the rowproxy_reconstructor module is required to change,
# but set every one for consistency
if getattr(item, "__module__", None) == _cy_row.__name__:
item.__module__ = new_name
yield _cy_row, _py_row
sys.modules.pop(new_name)
@testing.requires.cextensions
@testing.variation("direction", ["py_to_cy", "cy_to_py"])
def test_serialize_cy_py_cy(
self, direction: testing.Variation, _load_module
):
_cy_row, _py_row = _load_module
global Row
p = result.SimpleResultMetaData(["a", "w", "b"])
if direction.py_to_cy:
dump_cls = _py_row.BaseRow
load_cls = _cy_row.BaseRow
elif direction.cy_to_py:
dump_cls = _cy_row.BaseRow
load_cls = _py_row.BaseRow
else:
direction.fail()
for loads, dumps in picklers():
class Row(dump_cls):
pass
row = Row(p, p._processors, p._key_to_index, (1, 2, 3))
state = dumps(row)
class Row(load_cls):
pass
row2 = loads(state)
is_true(isinstance(row2, load_cls))
is_false(isinstance(row2, dump_cls))
state2 = dumps(row2)
class Row(dump_cls):
pass
row3 = loads(state2)
is_true(isinstance(row3, dump_cls))
def test_processors(self):
parent = result.SimpleResultMetaData(["a", "b", "c", "d"])
data = (1, 99, "42", "foo")
row_none = result.Row(parent, None, parent._key_to_index, data)
eq_(row_none._to_tuple_instance(), data)
row_all_p = result.Row(
parent, [str, float, int, str.upper], parent._key_to_index, data
)
eq_(row_all_p._to_tuple_instance(), ("1", 99.0, 42, "FOO"))
row_some_p = result.Row(
parent, [None, str, None, str.upper], parent._key_to_index, data
)
eq_(row_some_p._to_tuple_instance(), (1, "99", "42", "FOO"))
with expect_raises(AssertionError):
result.Row(parent, [None, str], parent._key_to_index, data)
def test_tuplegetter(self):
data = list(range(10, 20))
eq_(result.tuplegetter(1)(data), [11])
eq_(result.tuplegetter(1, 9, 3)(data), (11, 19, 13))
eq_(result.tuplegetter(2, 3, 4)(data), [12, 13, 14])
|
ResultTupleTest
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py
|
{
"start": 48969,
"end": 52963
}
|
class ____(GoogleCloudBaseOperator):
"""
Gets an entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogGetEntryOperator`
:param location: Required. The location of the entry to get.
:param entry_group: Required. The entry group of the entry to get.
:param entry: The ID of the entry to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_entry(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogEntryLink.persist(
context=context,
entry_id=self.entry,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Entry.to_dict(result)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogGetEntryGroupOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
|
CloudDataCatalogGetEntryOperator
|
python
|
getsentry__sentry
|
tests/sentry/seer/assisted_query/test_issues_tools.py
|
{
"start": 16815,
"end": 22395
}
|
class ____(APITestCase, SnubaTestCase):
databases = {"default", "control"}
def setUp(self):
super().setUp()
self.min_ago = before_now(minutes=1)
def test_execute_issues_query_basic(self):
"""Test basic issues query"""
# Create some issues
self.store_event(
data={
"event_id": "a" * 32,
"message": "Error message 1",
"level": "error",
"fingerprint": ["group-1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "Error message 2",
"level": "warning",
"fingerprint": ["group-2"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = execute_issues_query(
org_id=self.organization.id,
project_ids=[self.project.id],
query="is:unresolved",
stats_period="24h",
)
assert result is not None
assert isinstance(result, list)
assert len(result) >= 2
# Check structure of returned issues
for issue in result:
assert "id" in issue
assert "shortId" in issue
assert "title" in issue
assert "status" in issue
assert "project" in issue
def test_execute_issues_query_with_filter(self):
"""Test issues query with specific filter"""
# Create an error event
self.store_event(
data={
"event_id": "a" * 32,
"message": "Test error",
"level": "error",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = execute_issues_query(
org_id=self.organization.id,
project_ids=[self.project.id],
query="level:error",
stats_period="24h",
)
assert result is not None
assert isinstance(result, list)
# Should have at least our error event
assert len(result) >= 1
def test_execute_issues_query_with_sort(self):
"""Test issues query with sorting"""
# Create multiple issues
for i in range(3):
self.store_event(
data={
"event_id": chr(97 + i) * 32,
"message": f"Error {i}",
"fingerprint": [f"group-{i}"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = execute_issues_query(
org_id=self.organization.id,
project_ids=[self.project.id],
query="is:unresolved",
stats_period="24h",
sort="date",
)
assert result is not None
assert isinstance(result, list)
assert len(result) >= 3
def test_execute_issues_query_with_limit(self):
"""Test issues query with limit"""
# Create multiple issues
for i in range(5):
self.store_event(
data={
"event_id": chr(97 + i) * 32,
"message": f"Error {i}",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = execute_issues_query(
org_id=self.organization.id,
project_ids=[self.project.id],
query="is:unresolved",
stats_period="24h",
limit=2,
)
assert result is not None
assert isinstance(result, list)
# Should respect limit
assert len(result) <= 2
def test_execute_issues_query_nonexistent_organization(self):
"""Test that nonexistent organization returns None"""
result = execute_issues_query(
org_id=99999,
project_ids=[self.project.id],
query="is:unresolved",
stats_period="24h",
)
assert result is None
def test_execute_issues_query_multiple_projects(self):
"""Test issues query across multiple projects"""
project2 = self.create_project(organization=self.organization)
# Create issues in both projects
self.store_event(
data={
"event_id": "a" * 32,
"message": "Project 1 error",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "Project 2 error",
"timestamp": self.min_ago.isoformat(),
},
project_id=project2.id,
)
result = execute_issues_query(
org_id=self.organization.id,
project_ids=[self.project.id, project2.id],
query="is:unresolved",
stats_period="24h",
)
assert result is not None
assert isinstance(result, list)
# Should have issues from both projects
assert len(result) >= 2
project_ids = {issue["project"]["id"] for issue in result}
assert str(self.project.id) in project_ids
assert str(project2.id) in project_ids
@pytest.mark.django_db(databases=["default", "control"])
|
TestExecuteIssuesQuery
|
python
|
pydata__xarray
|
xarray/tests/test_dataset.py
|
{
"start": 6663,
"end": 7445
}
|
class ____(backends.InMemoryDataStore):
"""
Store that does not allow any data access.
"""
def __init__(self):
super().__init__()
self._indexvars = set()
def store(self, variables, *args, **kwargs) -> None:
super().store(variables, *args, **kwargs)
for k, v in variables.items():
if isinstance(v, IndexVariable):
self._indexvars.add(k)
def get_variables(self):
def lazy_inaccessible(k, v):
if k in self._indexvars:
return v
data = indexing.LazilyIndexedArray(InaccessibleArray(v.values))
return Variable(v.dims, data, v.attrs)
return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}
|
InaccessibleVariableDataStore
|
python
|
dask__dask
|
dask/tests/test_layers.py
|
{
"start": 329,
"end": 6472
}
|
class ____(SchedulerPlugin):
"""Plugin to help record which modules are imported on the scheduler"""
name = "import-check"
def __init__(self, pattern):
self.pattern = pattern
async def start(self, scheduler):
# Record the modules that have been imported when the scheduler starts
self.start_modules = set()
for mod in set(sys.modules):
if not mod.startswith(self.pattern):
self.start_modules.add(mod)
else:
# Manually remove the target library
sys.modules.pop(mod)
def test_array_chunk_shape_dep():
pytest.importorskip("numpy")
dac = pytest.importorskip("dask.array.core")
d = 2 # number of chunks in x,y
chunk = (2, 3) # chunk shape
shape = tuple(d * n for n in chunk) # array shape
chunks = dac.normalize_chunks(chunk, shape)
array_deps = ArrayChunkShapeDep(chunks)
def check(i, j):
chunk_shape = array_deps[(i, j)]
assert chunk_shape == chunk
for i in range(d):
for j in range(d):
check(i, j)
def test_array_slice_deps():
pytest.importorskip("numpy")
dac = pytest.importorskip("dask.array.core")
d = 2 # number of chunks in x,y
chunk = (2, 3) # chunk shape
shape = tuple(d * n for n in chunk) # array shape
chunks = dac.normalize_chunks(chunk, shape)
array_deps = ArraySliceDep(chunks)
def check(i, j):
slices = array_deps[(i, j)]
assert slices == (
slice(chunk[0] * i, chunk[0] * (i + 1), None),
slice(chunk[1] * j, chunk[1] * (j + 1), None),
)
for i in range(d):
for j in range(d):
check(i, j)
def _dataframe_shuffle(tmpdir):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
# Perform a computation using an HLG-based shuffle
df = pd.DataFrame({"a": range(10), "b": range(10, 20)})
return dd.from_pandas(df, npartitions=2).shuffle("a", shuffle_method="tasks")
def _dataframe_tree_reduction(tmpdir):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
# Perform a computation using an HLG-based tree reduction
df = pd.DataFrame({"a": range(10), "b": range(10, 20)})
return dd.from_pandas(df, npartitions=2).mean()
def _dataframe_broadcast_join(tmpdir):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
# Perform a computation using an HLG-based broadcast join
df = pd.DataFrame({"a": range(10), "b": range(10, 20)})
ddf1 = dd.from_pandas(df, npartitions=4)
ddf2 = dd.from_pandas(df, npartitions=1)
return ddf1.merge(ddf2, how="left", broadcast=True, shuffle_method="tasks")
def _array_creation(tmpdir):
da = pytest.importorskip("dask.array")
# Perform a computation using HLG-based array creation
return da.ones((100,)) + da.zeros((100,))
def _array_map_overlap(tmpdir):
da = pytest.importorskip("dask.array")
array = da.ones((100,))
return array.map_overlap(lambda x: x, depth=1, boundary="none")
def test_fractional_slice():
assert fractional_slice(("x", 4.9), {0: 2}) == (getitem, ("x", 5), (slice(0, 2),))
assert fractional_slice(("x", 3, 5.1), {0: 2, 1: 3}) == (
getitem,
("x", 3, 5),
(slice(None, None, None), slice(-3, None)),
)
assert fractional_slice(("x", 2.9, 5.1), {0: 2, 1: 3}) == (
getitem,
("x", 3, 5),
(slice(0, 2), slice(-3, None)),
)
fs = fractional_slice(("x", 4.9), {0: 2})
assert isinstance(fs[1][1], int)
def _pq_pyarrow(tmpdir):
pytest.importorskip("pyarrow")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2).to_parquet(
str(tmpdir)
)
filters = [("a", "<=", 2)]
ddf1 = dd.read_parquet(str(tmpdir), filters=filters)
return ddf1
def _read_csv(tmpdir):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
dd.from_pandas(
pd.DataFrame({"a": range(10)}),
npartitions=2,
).to_csv(str(tmpdir))
return dd.read_csv(os.path.join(str(tmpdir), "*"))
@pytest.mark.xfail(reason="#8480")
@pytest.mark.parametrize(
"op,lib",
[
(_dataframe_shuffle, "pandas."),
(_dataframe_tree_reduction, "pandas."),
(_dataframe_broadcast_join, "pandas."),
(_pq_pyarrow, "pandas."),
(_read_csv, "pandas."),
(_array_creation, "numpy."),
(_array_map_overlap, "numpy."),
],
)
@pytest.mark.parametrize("optimize_graph", [True, False])
def test_scheduler_highlevel_graph_unpack_import(op, lib, optimize_graph, loop, tmpdir):
# Test that array/dataframe-specific modules are not imported
# on the scheduler when an HLG layers are unpacked/materialized.
with cluster(scheduler_kwargs={"plugins": [SchedulerImportCheck(lib)]}) as (
scheduler,
workers,
):
with Client(scheduler["address"], loop=loop) as c:
# Perform a computation using a HighLevelGraph Layer
c.compute(op(tmpdir), optimize_graph=optimize_graph)
# Get the new modules which were imported on the scheduler during the computation
end_modules = c.run_on_scheduler(lambda: set(sys.modules))
start_modules = c.run_on_scheduler(
lambda dask_scheduler: dask_scheduler.plugins[
SchedulerImportCheck.name
].start_modules
)
new_modules = end_modules - start_modules
# Check that the scheduler didn't start with `lib`
# (otherwise we aren't testing anything)
assert not any(module.startswith(lib) for module in start_modules)
# Check whether we imported `lib` on the scheduler
assert not any(module.startswith(lib) for module in new_modules)
def _shuffle_op(ddf):
return ddf.shuffle("x", shuffle_method="tasks")
def _groupby_op(ddf):
return ddf.groupby("name").agg({"x": "mean"})
|
SchedulerImportCheck
|
python
|
dask__dask
|
dask/dataframe/dask_expr/datasets.py
|
{
"start": 457,
"end": 3572
}
|
class ____(PartitionsFiltered, BlockwiseIO):
_absorb_projections = True
_parameters = [
"start",
"end",
"dtypes",
"freq",
"partition_freq",
"seed",
"kwargs",
"columns",
"_partitions",
"_series",
]
_defaults = {
"start": "2000-01-01",
"end": "2000-01-31",
"dtypes": {"name": "string", "id": int, "x": float, "y": float},
"freq": "1s",
"partition_freq": "1d",
"seed": None,
"kwargs": {},
"_partitions": None,
"_series": False,
}
@functools.cached_property
def _meta(self):
result = self._make_timeseries_part("2000", "2000", 0).iloc[:0]
if self._series:
return result[result.columns[0]]
return result
def _divisions(self):
return pd.date_range(start=self.start, end=self.end, freq=self.partition_freq)
@property
def _dtypes(self):
dtypes = self.operand("dtypes")
return {col: dtypes[col] for col in self.operand("columns")}
@functools.cached_property
def random_state(self):
npartitions = len(self._divisions()) - 1
ndtypes = max(len(self.operand("dtypes")), 1)
random_state = np.random.RandomState(self.seed)
n = npartitions * ndtypes
random_data = random_state.bytes(n * 4) # `n` 32-bit integers
l = list(np.frombuffer(random_data, dtype=np.uint32).reshape((n,)))
assert len(l) == n
return l
@functools.cached_property
def _make_timeseries_part(self):
return MakeTimeseriesPart(
self.operand("dtypes"),
list(self._dtypes.keys()),
self.freq,
self.kwargs,
)
def _filtered_task(self, name: Key, index: int) -> Task:
full_divisions = self._divisions()
ndtypes = max(len(self.operand("dtypes")), 1)
task = Task(
name,
self._make_timeseries_part,
full_divisions[index],
full_divisions[index + 1],
self.random_state[index * ndtypes],
)
if self._series:
return Task(name, operator.getitem, task, self.operand("columns")[0])
return task
names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n), names)
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate, lam=1000):
return rstate.poisson(lam, size=n)
make = {
float: make_float,
int: make_int,
str: make_string,
object: make_string,
"string": make_string,
"category": make_categorical,
}
|
Timeseries
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_import_functionality.py
|
{
"start": 20940,
"end": 22469
}
|
class ____(AdminTestMixin, TestCase):
"""Test preview order displayed correctly (issue 1784)."""
fixtures = ["author"]
def test_import_preview_order(self):
author_id = Author.objects.first().id
response = self._do_import_post(
self.ebook_import_url,
"ebooks.csv",
input_format="0",
data={"author": author_id},
)
# test header rendered in correct order
target_header_re = (
r"<thead>[\\n\s]+"
r"<tr>[\\n\s]+"
r"<th></th>[\\n\s]+"
r"<th>id</th>[\\n\s]+"
r"<th>Email of the author</th>[\\n\s]+"
r"<th>name</th>[\\n\s]+"
r"<th>published_date</th>[\\n\s]+"
r"<th>Author Name</th>[\\n\s]+"
r"</tr>[\\n\s]+"
"</thead>"
)
self.assertRegex(response.content.decode(), target_header_re)
# test row rendered in correct order
target_row_re = (
r'<tr class="new">[\\n\s]+'
r'<td class="import-type">[\\n\s]+New[\\n\s]+</td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">1</ins></td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">test@example.com</ins></td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">Some book</ins></td>[\\n\s]+'
r"<td></td>[\\n\s]+"
r"<td></td>[\\n\s]+"
"</tr>"
)
self.assertRegex(response.content.decode(), target_row_re)
|
ConfirmImportPreviewOrderTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.