repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
xinruobingqing/robotChat | test.py | 1 | 5397 | # -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
sess = tf.Session()
def test1():
hello = tf.constant('Hello, TensorFlow!')
print(sess.run(hello))
def test2():
a = tf.constant(10)
b = tf.constant(32)
print(sess.run(a+b))
def test3():
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0)
# print(node1, node2)
sess1 = tf.Session()
# print(sess.run([node1, node2]))
node3 = tf.add(node1, node2)
print("node3==", node3)
print(sess1.run(node3))
def test4():
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
# print(adder_node, {a: 3, b: 4})
# print(sess.run(adder_node, {a: 3, b: 4}))
# print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
add_and_triple = adder_node * 3
c = add_and_triple, {a: 3, b: 4}
# print(add_and_triple, {a: 3, b: 4.5})
# print(sess.run(add_and_triple, {a: 3, b: 4.5}))
print(c)
# print(sess.run(c)) # error
def test5():
w = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = w * x + b
init = tf.global_variables_initializer()
sess.run(init)
# print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
fixw = tf.assign(w, [-1])
fixb = tf.assign(b, [1])
sess.run([fixw, fixb])
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
def test6():
w = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = w * x + b
init = tf.global_variables_initializer()
sess.run(init)
# print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init)
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print(sess.run([w, b]))
def test7():
w = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = w * x + b
y = tf.placeholder(tf.float32)
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
sess.run(train, {x: x_train, y: y_train})
currw, currb, curr_loss = sess.run([w, b, loss], {x: x_train, y: y_train})
print("w: %s, b: %s, loss: %s" % (currw, currb, curr_loss))
def test8():
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7., 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, batch_size=4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_eval}, y_eval, batch_size=4, num_epochs=1000)
estimator.fit(input_fn=input_fn, steps=1000)
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r" % train_loss)
print("eval loss: %r" % eval_loss)
def test9():
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, batch_size=4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_eval}, y_eval, batch_size=4, num_epochs=1000)
estimator.fit(input_fn=input_fn, steps=1000)
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r" % train_loss)
print("eval loss: %r" % eval_loss)
def test10():
def model(features, labels, mode):
w = tf.get_variable("w", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = w * features['x'] + b
loss = tf.reduce_sum(tf.square(y - labels))
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss), tf.assign_add(global_step, 1))
return tf.contrib.learn.ModelFnOps(mode=mode, predictions=y, loss=loss, train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_eval}, y_eval, 4, num_epochs=1000)
estimator.fit(input_fn=input_fn, steps=1000)
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r" % train_loss)
print("eval loss: %r" % eval_loss)
if __name__ == '__main__':
test10() | apache-2.0 |
mozman/ezdxf | tests/test_08_addons/test_814_text2path.py | 1 | 14273 | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import pytest
pytest.importorskip("matplotlib") # requires matplotlib!
from matplotlib.font_manager import FontProperties, findfont
from ezdxf.tools.fonts import FontFace
from ezdxf.addons import text2path
from ezdxf.path import Path
from ezdxf import path, bbox
from ezdxf.entities import Text, Hatch
from ezdxf.layouts import VirtualLayout
NOTO_SANS_SC = "Noto Sans SC"
noto_sans_sc_not_found = "Noto" not in findfont(
FontProperties(family=NOTO_SANS_SC)
)
def _to_paths(s, f="Arial"):
return text2path.make_paths_from_str(s, font=FontFace(family=f))
@pytest.mark.parametrize(
"s,c",
[
["1", 1],
["2", 1],
[".", 1],
["0", 2],
["a", 2],
["!", 2],
["@", 2],
["8", 3],
["ü", 3],
["&", 3],
["ä", 4],
["ö", 4],
["%", 5],
],
)
def test_make_paths_from_str(s, c):
assert len(_to_paths(s)) == c
@pytest.mark.skipif(
noto_sans_sc_not_found, reason=f'Font "{NOTO_SANS_SC}" not found'
)
@pytest.mark.parametrize("s,c", [["中", 3], ["国", 4], ["文", 3], ["字", 2]])
def test_chinese_char_paths_from_str(s, c):
assert len(_to_paths(s, f=NOTO_SANS_SC)) == c
def contour_and_holes(group):
return group[0], group[1:]
@pytest.mark.parametrize(
"s,h",
[
["1", 0],
["2", 0],
[".", 0],
["0", 1],
["a", 1],
["8", 2],
],
)
def test_group_one_contour_with_holes(s, h):
paths = _to_paths(s)
result = list(path.group_paths(paths))
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == h
@pytest.mark.parametrize("s", [":", "!", ";", "="])
def test_group_two_contours_without_holes(s):
paths = _to_paths(s)
result = list(path.group_paths(paths))
assert len(result) == 2
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == 0
@pytest.mark.parametrize(
"s",
[
"Ü",
"ö",
"ä",
],
)
def test_group_three_contours_and_ignore_holes(s):
paths = _to_paths(s)
result = list(path.group_paths(paths))
assert len(result) == 3
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
def test_group_percent_sign():
# Special case %: lower o is inside of the slash bounding box, but HATCH
# creation works as expected!
paths = _to_paths("%")
result = list(path.group_paths(paths))
assert len(result) == 2
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == 2
@pytest.mark.skipif(
noto_sans_sc_not_found, reason='Font "Noto Sans SC" not found'
)
@pytest.mark.parametrize("s,c", [["中", 1], ["国", 1], ["文", 2], ["字", 2]])
def test_group_chinese_chars_and_ignore_holes(s, c):
paths = _to_paths(s, f=NOTO_SANS_SC)
result = list(path.group_paths(paths))
assert len(result) == c
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
@pytest.fixture(scope="module")
def ff():
return FontFace(family="Arial")
class TestMakePathFromString:
# Surprise - even 0 and negative values work without any exceptions!
@pytest.mark.parametrize("size", [0, 0.05, 1, 2, 100, -1, -2, -100])
def test_text_path_height_for_exact_drawing_units(self, size, ff):
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.size.y == pytest.approx(abs(size))
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_path_coordinates_for_positive_size(self, size, ff):
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.extmax.y == pytest.approx(size)
assert bbox.extmin.y == pytest.approx(0)
@pytest.mark.parametrize("size", [-0.05, -1, -2, -100])
def test_path_coordinates_for_negative_size(self, size, ff):
# Negative text height mirrors text about the x-axis!
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.extmax.y == pytest.approx(0)
assert bbox.extmin.y == pytest.approx(size)
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_length_for_fit_alignment(self, size, ff):
length = 3
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="FIT", length=length
)
bbox = path.bbox(paths)
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
size
), "text height should be unscaled"
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_scaled_height_and_length_for_aligned_text(self, size, ff):
length = 3
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="LEFT"
)
default = path.bbox(paths)
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="ALIGNED", length=length
)
bbox = path.bbox(paths)
scale = bbox.size.x / default.size.x
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
size * scale
), "text height should be scaled"
def test_paths_from_empty_string(self, ff):
paths = text2path.make_paths_from_str("", font=ff)
assert len(paths) == 0
def test_make_multi_path_object(self, ff):
p = text2path.make_path_from_str("ABC", font=ff)
assert p.has_sub_paths is True
assert len(list(p.sub_paths())) == 6
def test_make_empty_multi_path_object(self, ff):
p = text2path.make_path_from_str("", font=ff)
assert p.has_sub_paths is False
assert len(p) == 0
class TestMakeHatchesFromString:
def test_hatches_from_empty_string(self, ff):
hatches = text2path.make_hatches_from_str("", font=ff)
assert len(hatches) == 0
def test_make_exterior_only_hatches(self, ff):
hatches = text2path.make_hatches_from_str("XXX", font=ff)
assert len(hatches) == 3
assert len(hatches[0].paths) == 1
def test_make_hatches_with_holes(self, ff):
hatches = text2path.make_hatches_from_str("AAA", font=ff)
assert len(hatches) == 3
assert len(hatches[0].paths) == 2, "expected external and one hole"
def test_total_length_for_fit_alignment(self, ff):
length = 3
hatches = text2path.make_hatches_from_str(
"XXX", font=ff, align="FIT", length=length
)
paths = []
for hatch in hatches:
paths.extend(path.from_hatch(hatch))
bbox = path.bbox(paths)
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
1.0
), "text height should be unscaled"
def test_check_entity_type():
with pytest.raises(TypeError):
text2path.check_entity_type(None)
with pytest.raises(TypeError):
text2path.check_entity_type(Hatch())
def make_text(text, location, alignment, height=1.0, rotation=0):
text = Text.new(
dxfattribs={
"text": text,
"height": height,
"rotation": rotation,
}
)
text.set_pos(location, align=alignment)
return text
def get_path_bbox(text):
p = text2path.make_path_from_entity(text)
return path.bbox([p], flatten=0)
def get_paths_bbox(text):
paths = text2path.make_paths_from_entity(text)
return path.bbox(paths, flatten=0)
def get_hatches_bbox(text):
hatches = text2path.make_hatches_from_entity(text)
return bbox.extents(hatches, flatten=0)
@pytest.fixture(params=[get_path_bbox, get_paths_bbox, get_hatches_bbox])
def get_bbox(request):
return request.param
class TestMakePathsFromEntity:
"""Test Paths (and Hatches) from TEXT entities.
make_hatches_from_entity() is basically make_paths_from_entity(), but
returns Hatch entities instead of Path objects.
Important: Don't use text with top or bottom curves for testing ("S", "O").
The Path bounding box calculation uses the "fast" method by checking only
the curve control points, which are outside the curve borders.
"""
@pytest.mark.parametrize(
"builder, type_",
[
(text2path.make_paths_from_entity, Path),
(text2path.make_hatches_from_entity, Hatch),
],
)
def test_text_returns_correct_types(self, builder, type_):
text = make_text("TEXT", (0, 0), "LEFT")
objects = builder(text)
assert len(objects) == 4
assert isinstance(objects[0], type_)
def test_text_height(self, get_bbox):
text = make_text("TEXT", (0, 0), "LEFT", height=1.5)
bbox = get_bbox(text)
assert bbox.size.y == pytest.approx(1.5)
def test_alignment_left(self, get_bbox):
text = make_text("TEXT", (7, 7), "LEFT")
bbox = get_bbox(text)
# font rendering is tricky, base offsets depend on the rendering engine
# and on extended font metrics, ...
assert bbox.extmin.x == pytest.approx(7, abs=0.1)
def test_alignment_center(self, get_bbox):
text = make_text("TEXT", (7, 7), "CENTER")
bbox = get_bbox(text)
assert bbox.center.x == pytest.approx(7)
def test_alignment_right(self, get_bbox):
text = make_text("TEXT", (7, 7), "RIGHT")
bbox = get_bbox(text)
assert bbox.extmax.x == pytest.approx(7)
def test_alignment_baseline(self, get_bbox):
text = make_text("TEXT", (7, 7), "CENTER")
bbox = get_bbox(text)
assert bbox.extmin.y == pytest.approx(7)
def test_alignment_bottom(self, get_bbox):
text = make_text("j", (7, 7), "BOTTOM_CENTER")
bbox = get_bbox(text)
# bottom border of descender should be 7, but ...
assert bbox.extmin.y == pytest.approx(7, abs=0.1)
def test_alignment_middle(self, get_bbox):
text = make_text("X", (7, 7), "MIDDLE_CENTER")
bbox = get_bbox(text)
assert bbox.center.y == pytest.approx(7)
def test_alignment_top(self, get_bbox):
text = make_text("X", (7, 7), "TOP_CENTER")
bbox = get_bbox(text)
assert bbox.extmax.y == pytest.approx(7)
def test_alignment_fit(self, get_bbox):
length = 2
height = 1
text = make_text("TEXT", (0, 0), "LEFT", height=height)
text.set_pos((1, 0), (1 + length, 0), "FIT")
bbox = get_bbox(text)
assert (
bbox.size.x == length
), "expected text length fits into given length"
assert bbox.size.y == height, "expected unscaled text height"
assert bbox.extmin.isclose((1, 0))
def test_alignment_aligned(self, get_bbox):
length = 2
height = 1
text = make_text("TEXT", (0, 0), "CENTER", height=height)
bbox = get_bbox(text)
ratio = bbox.size.x / bbox.size.y
text.set_pos((1, 0), (1 + length, 0), "ALIGNED")
bbox = get_bbox(text)
assert (
bbox.size.x == length
), "expected text length fits into given length"
assert bbox.size.y != height, "expected scaled text height"
assert bbox.extmin.isclose((1, 0))
assert bbox.size.x / bbox.size.y == pytest.approx(
ratio
), "expected same width/height ratio"
def test_rotation_90(self, get_bbox):
# Horizontal reference measurements:
bbox_hor = get_bbox(make_text("TEXT", (7, 7), "MIDDLE_CENTER"))
text_vert = make_text("TEXT", (7, 7), "MIDDLE_CENTER", rotation=90)
bbox_vert = get_bbox(text_vert)
assert bbox_hor.center == bbox_vert.center
assert bbox_hor.size.x == bbox_vert.size.y
assert bbox_hor.size.y == bbox_vert.size.x
Kind = text2path.Kind
class TestVirtualEntities:
@pytest.fixture
def text(self):
return make_text("TEST", (0, 0), "LEFT")
def test_virtual_entities_as_hatches(self, text):
entities = text2path.virtual_entities(text, kind=Kind.HATCHES)
types = {e.dxftype() for e in entities}
assert types == {"HATCH"}
def test_virtual_entities_as_splines_and_polylines(self, text):
entities = text2path.virtual_entities(text, kind=Kind.SPLINES)
types = {e.dxftype() for e in entities}
assert types == {"SPLINE", "POLYLINE"}
def test_virtual_entities_as_lwpolylines(self, text):
entities = text2path.virtual_entities(text, kind=Kind.LWPOLYLINES)
types = {e.dxftype() for e in entities}
assert types == {"LWPOLYLINE"}
def test_virtual_entities_to_all_types_at_once(self, text):
entities = text2path.virtual_entities(
text, kind=Kind.HATCHES + Kind.SPLINES + Kind.LWPOLYLINES
)
types = {e.dxftype() for e in entities}
assert types == {"LWPOLYLINE", "SPLINE", "POLYLINE", "HATCH"}
class TestExplode:
"""Based on text2path.virtual_entities() function, see test above."""
@pytest.fixture
def text(self):
return make_text("TEST", (0, 0), "LEFT")
def test_source_entity_is_destroyed(self, text):
assert text.is_alive is True
text2path.explode(text, kind=4)
assert (
text.is_alive is False
), "source entity should always be destroyed"
def test_explode_entity_into_layout(self, text):
layout = VirtualLayout()
entities = text2path.explode(text, kind=Kind.LWPOLYLINES, target=layout)
assert len(entities) == len(
layout
), "expected all entities added to the target layout"
def test_explode_entity_into_the_void(self, text):
assert (
text.get_layout() is None
), "source entity should not have a layout"
entities = text2path.explode(text, kind=Kind.LWPOLYLINES, target=None)
assert len(entities) == 4, "explode should work without a target layout"
if __name__ == "__main__":
pytest.main([__file__])
| mit |
lmorchard/gitshipped | vendor-local/lib/python/requests/packages/charade/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| bsd-3-clause |
papouso/odoo | addons/crm/base_partner_merge.py | 182 | 30903 | #!/usr/bin/env python
from __future__ import absolute_import
from email.utils import parseaddr
import functools
import htmlentitydefs
import itertools
import logging
import operator
import psycopg2
import re
from ast import literal_eval
from openerp.tools import mute_logger
# Validation Library https://pypi.python.org/pypi/validate_email/1.1
from .validate_email import validate_email
import openerp
from openerp.osv import osv, orm
from openerp.osv import fields
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
pattern = re.compile("&(\w+?);")
_logger = logging.getLogger('base.partner.merge')
# http://www.php2python.com/wiki/function.html-entity-decode/
def html_entity_decode_char(m, defs=htmlentitydefs.entitydefs):
try:
return defs[m.group(1)]
except KeyError:
return m.group(0)
def html_entity_decode(string):
return pattern.sub(html_entity_decode_char, string)
def sanitize_email(email):
assert isinstance(email, basestring) and email
result = re.subn(r';|/|:', ',',
html_entity_decode(email or ''))[0].split(',')
emails = [parseaddr(email)[1]
for item in result
for email in item.split()]
return [email.lower()
for email in emails
if validate_email(email)]
def is_integer_list(ids):
return all(isinstance(i, (int, long)) for i in ids)
class ResPartner(osv.Model):
_inherit = 'res.partner'
_columns = {
'id': fields.integer('Id', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
}
class MergePartnerLine(osv.TransientModel):
_name = 'base.partner.merge.line'
_columns = {
'wizard_id': fields.many2one('base.partner.merge.automatic.wizard',
'Wizard'),
'min_id': fields.integer('MinID'),
'aggr_ids': fields.char('Ids', required=True),
}
_order = 'min_id asc'
class MergePartnerAutomatic(osv.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_columns = {
# Group by
'group_by_email': fields.boolean('Email'),
'group_by_name': fields.boolean('Name'),
'group_by_is_company': fields.boolean('Is Company'),
'group_by_vat': fields.boolean('VAT'),
'group_by_parent_id': fields.boolean('Parent Company'),
'state': fields.selection([('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')],
'State',
readonly=True,
required=True),
'number_group': fields.integer("Group of Contacts", readonly=True),
'current_line_id': fields.many2one('base.partner.merge.line', 'Current Line'),
'line_ids': fields.one2many('base.partner.merge.line', 'wizard_id', 'Lines'),
'partner_ids': fields.many2many('res.partner', string='Contacts'),
'dst_partner_id': fields.many2one('res.partner', string='Destination Contact'),
'exclude_contact': fields.boolean('A user associated to the contact'),
'exclude_journal_item': fields.boolean('Journal Items associated to the contact'),
'maximum_group': fields.integer("Maximum of Group of Contacts"),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(MergePartnerAutomatic, self).default_get(cr, uid, fields, context)
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner_ids = context['active_ids']
res['state'] = 'selection'
res['partner_ids'] = partner_ids
res['dst_partner_id'] = self._get_ordered_partner(cr, uid, partner_ids, context=context)[-1].id
return res
_defaults = {
'state': 'option'
}
def get_fk_on(self, cr, table):
q = """ SELECT cl1.relname as table,
att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
return cr.execute(q, (table,))
def _update_foreign_keys(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_foreign_keys for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
# find the many2one relation to a partner
proxy = self.pool.get('res.partner')
self.get_fk_on(cr, 'res_partner')
# ignore two tables
for table, column in cr.fetchall():
if 'base_partner_merge_' in table:
continue
partner_ids = tuple(map(int, src_partners))
query = "SELECT column_name FROM information_schema.columns WHERE table_name LIKE '%s'" % (table)
cr.execute(query, ())
columns = []
for data in cr.fetchall():
if data[0] != column:
columns.append(data[0])
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET %(column)s = %%s
WHERE
%(column)s = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
%(column)s = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner_id in partner_ids:
cr.execute(query, (dst_partner.id, partner_id, dst_partner.id))
else:
try:
with mute_logger('openerp.sql_db'), cr.savepoint():
query = 'UPDATE "%(table)s" SET %(column)s = %%s WHERE %(column)s IN %%s' % query_dic
cr.execute(query, (dst_partner.id, partner_ids,))
if column == proxy._parent_name and table == 'res_partner':
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id AND
cycle.id != cycle.parent_id
)
SELECT id FROM cycle WHERE id = parent_id AND id = %s
"""
cr.execute(query, (dst_partner.id,))
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
query = 'DELETE FROM %(table)s WHERE %(column)s = %%s' % query_dic
cr.execute(query, (partner_id,))
def _update_reference_fields(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_reference_fields for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
def update_records(model, src, field_model='model', field_id='res_id', context=None):
proxy = self.pool.get(model)
if proxy is None:
return
domain = [(field_model, '=', 'res.partner'), (field_id, '=', src.id)]
ids = proxy.search(cr, openerp.SUPERUSER_ID, domain, context=context)
try:
with mute_logger('openerp.sql_db'), cr.savepoint():
return proxy.write(cr, openerp.SUPERUSER_ID, ids, {field_id: dst_partner.id}, context=context)
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
return proxy.unlink(cr, openerp.SUPERUSER_ID, ids, context=context)
update_records = functools.partial(update_records, context=context)
for partner in src_partners:
update_records('calendar', src=partner, field_model='model_id.model')
update_records('ir.attachment', src=partner, field_model='res_model')
update_records('mail.followers', src=partner, field_model='res_model')
update_records('mail.message', src=partner)
update_records('marketing.campaign.workitem', src=partner, field_model='object_id.model')
update_records('ir.model.data', src=partner)
proxy = self.pool['ir.model.fields']
domain = [('ttype', '=', 'reference')]
record_ids = proxy.search(cr, openerp.SUPERUSER_ID, domain, context=context)
for record in proxy.browse(cr, openerp.SUPERUSER_ID, record_ids, context=context):
try:
proxy_model = self.pool[record.model]
column = proxy_model._columns[record.name]
except KeyError:
# unknown model or field => skip
continue
if isinstance(column, fields.function):
continue
for partner in src_partners:
domain = [
(record.name, '=', 'res.partner,%d' % partner.id)
]
model_ids = proxy_model.search(cr, openerp.SUPERUSER_ID, domain, context=context)
values = {
record.name: 'res.partner,%d' % dst_partner.id,
}
proxy_model.write(cr, openerp.SUPERUSER_ID, model_ids, values, context=context)
def _update_values(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_values for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
columns = dst_partner._columns
def write_serializer(column, item):
if isinstance(item, browse_record):
return item.id
else:
return item
values = dict()
for column, field in columns.iteritems():
if field._type not in ('many2many', 'one2many') and not isinstance(field, fields.function):
for item in itertools.chain(src_partners, [dst_partner]):
if item[column]:
values[column] = write_serializer(column, item[column])
values.pop('id', None)
parent_id = values.pop('parent_id', None)
dst_partner.write(values)
if parent_id and parent_id != dst_partner.id:
try:
dst_partner.write({'parent_id': parent_id})
except (osv.except_osv, orm.except_orm):
_logger.info('Skip recursive partner hierarchies for parent_id %s of partner: %s', parent_id, dst_partner.id)
@mute_logger('openerp.osv.expression', 'openerp.models')
def _merge(self, cr, uid, partner_ids, dst_partner=None, context=None):
proxy = self.pool.get('res.partner')
partner_ids = proxy.exists(cr, uid, list(partner_ids), context=context)
if len(partner_ids) < 2:
return
if len(partner_ids) > 3:
raise osv.except_osv(_('Error'), _("For safety reasons, you cannot merge more than 3 contacts together. You can re-open the wizard several times if needed."))
if openerp.SUPERUSER_ID != uid and len(set(partner.email for partner in proxy.browse(cr, uid, partner_ids, context=context))) > 1:
raise osv.except_osv(_('Error'), _("All contacts must have the same email. Only the Administrator can merge contacts with different emails."))
if dst_partner and dst_partner.id in partner_ids:
src_partners = proxy.browse(cr, uid, [id for id in partner_ids if id != dst_partner.id], context=context)
else:
ordered_partners = self._get_ordered_partner(cr, uid, partner_ids, context)
dst_partner = ordered_partners[-1]
src_partners = ordered_partners[:-1]
_logger.info("dst_partner: %s", dst_partner.id)
if openerp.SUPERUSER_ID != uid and self._model_is_installed(cr, uid, 'account.move.line', context=context) and \
self.pool.get('account.move.line').search(cr, openerp.SUPERUSER_ID, [('partner_id', 'in', [partner.id for partner in src_partners])], context=context):
raise osv.except_osv(_('Error'), _("Only the destination contact may be linked to existing Journal Items. Please ask the Administrator if you need to merge several contacts linked to existing Journal Items."))
call_it = lambda function: function(cr, uid, src_partners, dst_partner,
context=context)
call_it(self._update_foreign_keys)
call_it(self._update_reference_fields)
call_it(self._update_values)
_logger.info('(uid = %s) merged the partners %r with %s', uid, list(map(operator.attrgetter('id'), src_partners)), dst_partner.id)
dst_partner.message_post(body='%s %s'%(_("Merged with the following partners:"), ", ".join('%s<%s>(ID %s)' % (p.name, p.email or 'n/a', p.id) for p in src_partners)))
for partner in src_partners:
partner.unlink()
def clean_emails(self, cr, uid, context=None):
"""
Clean the email address of the partner, if there is an email field with
a mimum of two addresses, the system will create a new partner, with the
information of the previous one and will copy the new cleaned email into
the email field.
"""
context = dict(context or {})
proxy_model = self.pool['ir.model.fields']
field_ids = proxy_model.search(cr, uid, [('model', '=', 'res.partner'),
('ttype', 'like', '%2many')],
context=context)
fields = proxy_model.read(cr, uid, field_ids, context=context)
reset_fields = dict((field['name'], []) for field in fields)
proxy_partner = self.pool['res.partner']
context['active_test'] = False
ids = proxy_partner.search(cr, uid, [], context=context)
fields = ['name', 'var' 'partner_id' 'is_company', 'email']
partners = proxy_partner.read(cr, uid, ids, fields, context=context)
partners.sort(key=operator.itemgetter('id'))
partners_len = len(partners)
_logger.info('partner_len: %r', partners_len)
for idx, partner in enumerate(partners):
if not partner['email']:
continue
percent = (idx / float(partners_len)) * 100.0
_logger.info('idx: %r', idx)
_logger.info('percent: %r', percent)
try:
emails = sanitize_email(partner['email'])
head, tail = emails[:1], emails[1:]
email = head[0] if head else False
proxy_partner.write(cr, uid, [partner['id']],
{'email': email}, context=context)
for email in tail:
values = dict(reset_fields, email=email)
proxy_partner.copy(cr, uid, partner['id'], values,
context=context)
except Exception:
_logger.exception("There is a problem with this partner: %r", partner)
raise
return True
def close_cb(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
def _generate_query(self, fields, maximum_group=100):
sql_fields = []
for field in fields:
if field in ['email', 'name']:
sql_fields.append('lower(%s)' % field)
elif field in ['vat']:
sql_fields.append("replace(%s, ' ', '')" % field)
else:
sql_fields.append(field)
group_fields = ', '.join(sql_fields)
filters = []
for field in fields:
if field in ['email', 'name', 'vat']:
filters.append((field, 'IS NOT', 'NULL'))
criteria = ' AND '.join('%s %s %s' % (field, operator, value)
for field, operator, value in filters)
text = [
"SELECT min(id), array_agg(id)",
"FROM res_partner",
]
if criteria:
text.append('WHERE %s' % criteria)
text.extend([
"GROUP BY %s" % group_fields,
"HAVING COUNT(*) >= 2",
"ORDER BY min(id)",
])
if maximum_group:
text.extend([
"LIMIT %s" % maximum_group,
])
return ' '.join(text)
def _compute_selected_groupby(self, this):
group_by_str = 'group_by_'
group_by_len = len(group_by_str)
fields = [
key[group_by_len:]
for key in self._columns.keys()
if key.startswith(group_by_str)
]
groups = [
field
for field in fields
if getattr(this, '%s%s' % (group_by_str, field), False)
]
if not groups:
raise osv.except_osv(_('Error'),
_("You have to specify a filter for your selection"))
return groups
def next_cb(self, cr, uid, ids, context=None):
"""
Don't compute any thing
"""
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def _get_ordered_partner(self, cr, uid, partner_ids, context=None):
partners = self.pool.get('res.partner').browse(cr, uid, list(partner_ids), context=context)
ordered_partners = sorted(sorted(partners,
key=operator.attrgetter('create_date'), reverse=True),
key=operator.attrgetter('active'), reverse=True)
return ordered_partners
def _next_screen(self, cr, uid, this, context=None):
this.refresh()
values = {}
if this.line_ids:
# in this case, we try to find the next record.
current_line = this.line_ids[0]
current_partner_ids = literal_eval(current_line.aggr_ids)
values.update({
'current_line_id': current_line.id,
'partner_ids': [(6, 0, current_partner_ids)],
'dst_partner_id': self._get_ordered_partner(cr, uid, current_partner_ids, context)[-1].id,
'state': 'selection',
})
else:
values.update({
'current_line_id': False,
'partner_ids': [],
'state': 'finished',
})
this.write(values)
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def _model_is_installed(self, cr, uid, model, context=None):
proxy = self.pool.get('ir.model')
domain = [('model', '=', model)]
return proxy.search_count(cr, uid, domain, context=context) > 0
def _partner_use_in(self, cr, uid, aggr_ids, models, context=None):
"""
Check if there is no occurence of this group of partner in the selected
model
"""
for model, field in models.iteritems():
proxy = self.pool.get(model)
domain = [(field, 'in', aggr_ids)]
if proxy.search_count(cr, uid, domain, context=context):
return True
return False
def compute_models(self, cr, uid, ids, context=None):
"""
Compute the different models needed by the system if you want to exclude
some partners.
"""
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
models = {}
if this.exclude_contact:
models['res.users'] = 'partner_id'
if self._model_is_installed(cr, uid, 'account.move.line', context=context) and this.exclude_journal_item:
models['account.move.line'] = 'partner_id'
return models
def _process_query(self, cr, uid, ids, query, context=None):
"""
Execute the select request and write the result in this wizard
"""
proxy = self.pool.get('base.partner.merge.line')
this = self.browse(cr, uid, ids[0], context=context)
models = self.compute_models(cr, uid, ids, context=context)
cr.execute(query)
counter = 0
for min_id, aggr_ids in cr.fetchall():
if models and self._partner_use_in(cr, uid, aggr_ids, models, context=context):
continue
values = {
'wizard_id': this.id,
'min_id': min_id,
'aggr_ids': aggr_ids,
}
proxy.create(cr, uid, values, context=context)
counter += 1
values = {
'state': 'selection',
'number_group': counter,
}
this.write(values)
_logger.info("counter: %s", counter)
def start_process_cb(self, cr, uid, ids, context=None):
"""
Start the process.
* Compute the selected groups (with duplication)
* If the user has selected the 'exclude_XXX' fields, avoid the partners.
"""
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
groups = self._compute_selected_groupby(this)
query = self._generate_query(groups, this.maximum_group)
self._process_query(cr, uid, ids, query, context=context)
return self._next_screen(cr, uid, this, context)
def automatic_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
this.start_process_cb()
this.refresh()
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def parent_migration_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
query = """
SELECT
min(p1.id),
array_agg(DISTINCT p1.id)
FROM
res_partner as p1
INNER join
res_partner as p2
ON
p1.email = p2.email AND
p1.name = p2.name AND
(p1.parent_id = p2.id OR p1.id = p2.parent_id)
WHERE
p2.id IS NOT NULL
GROUP BY
p1.email,
p1.name,
CASE WHEN p1.parent_id = p2.id THEN p2.id
ELSE p1.id
END
HAVING COUNT(*) >= 2
ORDER BY
min(p1.id)
"""
self._process_query(cr, uid, ids, query, context=context)
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL,
parent_id = NULL
WHERE
parent_id = id
""")
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def update_all_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# WITH RECURSIVE cycle(id, parent_id) AS (
# SELECT id, parent_id FROM res_partner
# UNION
# SELECT cycle.id, res_partner.parent_id
# FROM res_partner, cycle
# WHERE res_partner.id = cycle.parent_id AND
# cycle.id != cycle.parent_id
# )
# UPDATE res_partner
# SET parent_id = NULL
# WHERE id in (SELECT id FROM cycle WHERE id = parent_id);
this = self.browse(cr, uid, ids[0], context=context)
self.parent_migration_process_cb(cr, uid, ids, context=None)
list_merge = [
{'group_by_vat': True, 'group_by_email': True, 'group_by_name': True},
# {'group_by_name': True, 'group_by_is_company': True, 'group_by_parent_id': True},
# {'group_by_email': True, 'group_by_is_company': True, 'group_by_parent_id': True},
# {'group_by_name': True, 'group_by_vat': True, 'group_by_is_company': True, 'exclude_journal_item': True},
# {'group_by_email': True, 'group_by_vat': True, 'group_by_is_company': True, 'exclude_journal_item': True},
# {'group_by_email': True, 'group_by_is_company': True, 'exclude_contact': True, 'exclude_journal_item': True},
# {'group_by_name': True, 'group_by_is_company': True, 'exclude_contact': True, 'exclude_journal_item': True}
]
for merge_value in list_merge:
id = self.create(cr, uid, merge_value, context=context)
self.automatic_process_cb(cr, uid, [id], context=context)
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL
WHERE
parent_id IS NOT NULL AND
is_company IS NOT NULL
""")
# cr.execute("""
# UPDATE
# res_partner as p1
# SET
# is_company = NULL,
# parent_id = (
# SELECT p2.id
# FROM res_partner as p2
# WHERE p2.email = p1.email AND
# p2.parent_id != p2.id
# LIMIT 1
# )
# WHERE
# p1.parent_id = p1.id
# """)
return self._next_screen(cr, uid, this, context)
def merge_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
partner_ids = set(map(int, this.partner_ids))
if not partner_ids:
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
self._merge(cr, uid, partner_ids, this.dst_partner_id, context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def auto_set_parent_id(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# select partner who have one least invoice
partner_treated = ['@gmail.com']
cr.execute(""" SELECT p.id, p.email
FROM res_partner as p
LEFT JOIN account_invoice as a
ON p.id = a.partner_id AND a.state in ('open','paid')
WHERE p.grade_id is NOT NULL
GROUP BY p.id
ORDER BY COUNT(a.id) DESC
""")
re_email = re.compile(r".*@")
for id, email in cr.fetchall():
# check email domain
email = re_email.sub("@", email or "")
if not email or email in partner_treated:
continue
partner_treated.append(email)
# don't update the partners if they are more of one who have invoice
cr.execute(""" SELECT *
FROM res_partner as p
WHERE p.id != %s AND p.email LIKE '%%%s' AND
EXISTS (SELECT * FROM account_invoice as a WHERE p.id = a.partner_id AND a.state in ('open','paid'))
""" % (id, email))
if len(cr.fetchall()) > 1:
_logger.info("%s MORE OF ONE COMPANY", email)
continue
# to display changed values
cr.execute(""" SELECT id,email
FROM res_partner
WHERE parent_id != %s AND id != %s AND email LIKE '%%%s'
""" % (id, id, email))
_logger.info("%r", cr.fetchall())
# upgrade
cr.execute(""" UPDATE res_partner
SET parent_id = %s
WHERE id != %s AND email LIKE '%%%s'
""" % (id, id, email))
return False
| agpl-3.0 |
webwlsong/mycli | tests/utils.py | 22 | 1357 | import pytest
import pymysql
from mycli.main import format_output, special
from os import getenv
# TODO: should this be somehow be divined from environment?
USER, HOST, PORT, CHARSET = 'root', 'localhost', 3306, 'utf8'
PASSWORD = getenv('PASSWORD')
def db_connection(dbname=None):
conn = pymysql.connect(user=USER, host=HOST, port=PORT, database=dbname, password=PASSWORD,
charset=CHARSET)
conn.autocommit = True
return conn
try:
db_connection()
CAN_CONNECT_TO_DB = True
except:
CAN_CONNECT_TO_DB = False
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB,
reason="Need a mysql instance at localhost accessible by user 'root'")
def create_db(dbname):
with db_connection().cursor() as cur:
try:
cur.execute('''DROP DATABASE IF EXISTS _test_db''')
cur.execute('''CREATE DATABASE _test_db''')
except:
pass
def run(executor, sql, join=False):
" Return string output for the sql to be run "
result = []
for title, rows, headers, status in executor.run(sql):
result.extend(format_output(title, rows, headers, status, 'psql'))
if join:
result = '\n'.join(result)
return result
def set_expanded_output(is_expanded):
""" Pass-through for the tests """
return special.set_expanded_output(is_expanded)
| bsd-3-clause |
labcodes/django | tests/get_or_create/tests.py | 5 | 19815 | import time
import traceback
from datetime import date, datetime, timedelta
from threading import Thread
from django.core.exceptions import FieldError
from django.db import DatabaseError, IntegrityError, connection
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from .models import (
Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile,
Publisher, Tag, Thing,
)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.get_or_create(first_name="Tom", last_name="Smith")
def test_get_or_create_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.get_or_create(pk=1)
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj2, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
def test_callable_defaults(self):
"""
Callables in `defaults` are evaluated if the instance is created.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)},
)
self.assertTrue(created)
self.assertEqual(date(1943, 2, 25), obj.birthday)
def test_callable_defaults_not_called(self):
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John", last_name="Lennon",
defaults={"birthday": lambda: raise_exception()},
)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_savepoint_rollback(self):
"""
The database connection is still usable after a DatabaseError in
get_or_create() (#20463).
"""
Tag.objects.create(text='foo')
with self.assertRaises(DatabaseError):
# pk 123456789 doesn't exist, so the tag object will be created.
# Saving triggers a unique constraint violation on 'text'.
Tag.objects.get_or_create(pk=123456789, defaults={'text': 'foo'})
# Tag objects can be created after the error.
Tag.objects.create(text='bar')
def test_get_or_create_empty(self):
"""
If all the attributes on a model have defaults, get_or_create() doesn't
require any arguments.
"""
DefaultPerson.objects.get_or_create()
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
with self.assertRaises(IntegrityError):
a_thing.tags.get_or_create(text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.update_or_create(first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.update_or_create(pk=1)
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'another testing',
}
)
self.assertFalse(created)
self.assertEqual(obj.defaults, 'another testing')
def test_create_callable_default(self):
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison',
defaults={'birthday': lambda: date(1943, 2, 25)},
)
self.assertIs(created, True)
self.assertEqual(obj.birthday, date(1943, 2, 25))
def test_update_callable_default(self):
Person.objects.update_or_create(
first_name='George', last_name='Harrison', birthday=date(1942, 2, 25),
)
obj, created = Person.objects.update_or_create(
first_name='George',
defaults={'last_name': lambda: 'NotHarrison'},
)
self.assertIs(created, False)
self.assertEqual(obj.last_name, 'NotHarrison')
class UpdateOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_updates_in_transaction(self):
"""
Objects are selected and updated in a transaction to avoid race
conditions. This test forces update_or_create() to hold the lock
in another thread for a relatively long time so that it can update
while it holds the lock. The updated field isn't a field in 'defaults',
so update_or_create() shouldn't have an effect on it.
"""
lock_status = {'has_grabbed_lock': False}
def birthday_sleep():
lock_status['has_grabbed_lock'] = True
time.sleep(0.5)
return date(1940, 10, 10)
def update_birthday_slowly():
Person.objects.update_or_create(
first_name='John', defaults={'birthday': birthday_sleep}
)
# Avoid leaking connection for Oracle
connection.close()
def lock_wait():
# timeout after ~0.5 seconds
for i in range(20):
time.sleep(0.025)
if lock_status['has_grabbed_lock']:
return True
return False
Person.objects.create(first_name='John', last_name='Lennon', birthday=date(1940, 10, 9))
# update_or_create in a separate thread
t = Thread(target=update_birthday_slowly)
before_start = datetime.now()
t.start()
if not lock_wait():
self.skipTest('Database took too long to lock the row')
# Update during lock
Person.objects.filter(first_name='John').update(last_name='NotLennon')
after_update = datetime.now()
# Wait for thread to finish
t.join()
# The update remains and it blocked.
updated_person = Person.objects.get(first_name='John')
self.assertGreater(after_update - before_start, timedelta(seconds=0.5))
self.assertEqual(updated_person.last_name, 'NotLennon')
class InvalidCreateArgumentsTests(SimpleTestCase):
msg = "Invalid field name(s) for model Thing: 'nonexistent'."
def test_get_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.get_or_create(name='a', defaults={'nonexistent': 'b'})
def test_get_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.get_or_create(name='a', nonexistent='b')
def test_update_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.update_or_create(name='a', defaults={'nonexistent': 'b'})
def test_update_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.update_or_create(name='a', nonexistent='b')
def test_multiple_invalid_fields(self):
with self.assertRaisesMessage(FieldError, "Invalid field name(s) for model Thing: 'invalid', 'nonexistent'"):
Thing.objects.update_or_create(name='a', nonexistent='b', defaults={'invalid': 'c'})
| bsd-3-clause |
tectronics/mythbox | resources/lib/twisted/twisted/conch/insults/window.py | 78 | 26264 | # -*- test-case-name: twisted.conch.test.test_window -*-
"""
Simple insults-based widget library
@author: Jp Calderone
"""
import array
from twisted.conch.insults import insults, helper
from twisted.python import text as tptext
class YieldFocus(Exception):
"""Input focus manipulation exception
"""
class BoundedTerminalWrapper(object):
def __init__(self, terminal, width, height, xoff, yoff):
self.width = width
self.height = height
self.xoff = xoff
self.yoff = yoff
self.terminal = terminal
self.cursorForward = terminal.cursorForward
self.selectCharacterSet = terminal.selectCharacterSet
self.selectGraphicRendition = terminal.selectGraphicRendition
self.saveCursor = terminal.saveCursor
self.restoreCursor = terminal.restoreCursor
def cursorPosition(self, x, y):
return self.terminal.cursorPosition(
self.xoff + min(self.width, x),
self.yoff + min(self.height, y)
)
def cursorHome(self):
return self.terminal.cursorPosition(
self.xoff, self.yoff)
def write(self, bytes):
return self.terminal.write(bytes)
class Widget(object):
focused = False
parent = None
dirty = False
width = height = None
def repaint(self):
if not self.dirty:
self.dirty = True
if self.parent is not None and not self.parent.dirty:
self.parent.repaint()
def filthy(self):
self.dirty = True
def redraw(self, width, height, terminal):
self.filthy()
self.draw(width, height, terminal)
def draw(self, width, height, terminal):
if width != self.width or height != self.height or self.dirty:
self.width = width
self.height = height
self.dirty = False
self.render(width, height, terminal)
def render(self, width, height, terminal):
pass
def sizeHint(self):
return None
def keystrokeReceived(self, keyID, modifier):
if keyID == '\t':
self.tabReceived(modifier)
elif keyID == '\x7f':
self.backspaceReceived()
elif keyID in insults.FUNCTION_KEYS:
self.functionKeyReceived(keyID, modifier)
else:
self.characterReceived(keyID, modifier)
def tabReceived(self, modifier):
# XXX TODO - Handle shift+tab
raise YieldFocus()
def focusReceived(self):
"""Called when focus is being given to this widget.
May raise YieldFocus is this widget does not want focus.
"""
self.focused = True
self.repaint()
def focusLost(self):
self.focused = False
self.repaint()
def backspaceReceived(self):
pass
def functionKeyReceived(self, keyID, modifier):
func = getattr(self, 'func_' + keyID.name, None)
if func is not None:
func(modifier)
def characterReceived(self, keyID, modifier):
pass
class ContainerWidget(Widget):
"""
@ivar focusedChild: The contained widget which currently has
focus, or None.
"""
focusedChild = None
focused = False
def __init__(self):
Widget.__init__(self)
self.children = []
def addChild(self, child):
assert child.parent is None
child.parent = self
self.children.append(child)
if self.focusedChild is None and self.focused:
try:
child.focusReceived()
except YieldFocus:
pass
else:
self.focusedChild = child
self.repaint()
def remChild(self, child):
assert child.parent is self
child.parent = None
self.children.remove(child)
self.repaint()
def filthy(self):
for ch in self.children:
ch.filthy()
Widget.filthy(self)
def render(self, width, height, terminal):
for ch in self.children:
ch.draw(width, height, terminal)
def changeFocus(self):
self.repaint()
if self.focusedChild is not None:
self.focusedChild.focusLost()
focusedChild = self.focusedChild
self.focusedChild = None
try:
curFocus = self.children.index(focusedChild) + 1
except ValueError:
raise YieldFocus()
else:
curFocus = 0
while curFocus < len(self.children):
try:
self.children[curFocus].focusReceived()
except YieldFocus:
curFocus += 1
else:
self.focusedChild = self.children[curFocus]
return
# None of our children wanted focus
raise YieldFocus()
def focusReceived(self):
self.changeFocus()
self.focused = True
def keystrokeReceived(self, keyID, modifier):
if self.focusedChild is not None:
try:
self.focusedChild.keystrokeReceived(keyID, modifier)
except YieldFocus:
self.changeFocus()
self.repaint()
else:
Widget.keystrokeReceived(self, keyID, modifier)
class TopWindow(ContainerWidget):
"""
A top-level container object which provides focus wrap-around and paint
scheduling.
@ivar painter: A no-argument callable which will be invoked when this
widget needs to be redrawn.
@ivar scheduler: A one-argument callable which will be invoked with a
no-argument callable and should arrange for it to invoked at some point in
the near future. The no-argument callable will cause this widget and all
its children to be redrawn. It is typically beneficial for the no-argument
callable to be invoked at the end of handling for whatever event is
currently active; for example, it might make sense to call it at the end of
L{twisted.conch.insults.insults.ITerminalProtocol.keystrokeReceived}.
Note, however, that since calls to this may also be made in response to no
apparent event, arrangements should be made for the function to be called
even if an event handler such as C{keystrokeReceived} is not on the call
stack (eg, using C{reactor.callLater} with a short timeout).
"""
focused = True
def __init__(self, painter, scheduler):
ContainerWidget.__init__(self)
self.painter = painter
self.scheduler = scheduler
_paintCall = None
def repaint(self):
if self._paintCall is None:
self._paintCall = object()
self.scheduler(self._paint)
ContainerWidget.repaint(self)
def _paint(self):
self._paintCall = None
self.painter()
def changeFocus(self):
try:
ContainerWidget.changeFocus(self)
except YieldFocus:
try:
ContainerWidget.changeFocus(self)
except YieldFocus:
pass
def keystrokeReceived(self, keyID, modifier):
try:
ContainerWidget.keystrokeReceived(self, keyID, modifier)
except YieldFocus:
self.changeFocus()
class AbsoluteBox(ContainerWidget):
def moveChild(self, child, x, y):
for n in range(len(self.children)):
if self.children[n][0] is child:
self.children[n] = (child, x, y)
break
else:
raise ValueError("No such child", child)
def render(self, width, height, terminal):
for (ch, x, y) in self.children:
wrap = BoundedTerminalWrapper(terminal, width - x, height - y, x, y)
ch.draw(width, height, wrap)
class _Box(ContainerWidget):
TOP, CENTER, BOTTOM = range(3)
def __init__(self, gravity=CENTER):
ContainerWidget.__init__(self)
self.gravity = gravity
def sizeHint(self):
height = 0
width = 0
for ch in self.children:
hint = ch.sizeHint()
if hint is None:
hint = (None, None)
if self.variableDimension == 0:
if hint[0] is None:
width = None
elif width is not None:
width += hint[0]
if hint[1] is None:
height = None
elif height is not None:
height = max(height, hint[1])
else:
if hint[0] is None:
width = None
elif width is not None:
width = max(width, hint[0])
if hint[1] is None:
height = None
elif height is not None:
height += hint[1]
return width, height
def render(self, width, height, terminal):
if not self.children:
return
greedy = 0
wants = []
for ch in self.children:
hint = ch.sizeHint()
if hint is None:
hint = (None, None)
if hint[self.variableDimension] is None:
greedy += 1
wants.append(hint[self.variableDimension])
length = (width, height)[self.variableDimension]
totalWant = sum([w for w in wants if w is not None])
if greedy:
leftForGreedy = int((length - totalWant) / greedy)
widthOffset = heightOffset = 0
for want, ch in zip(wants, self.children):
if want is None:
want = leftForGreedy
subWidth, subHeight = width, height
if self.variableDimension == 0:
subWidth = want
else:
subHeight = want
wrap = BoundedTerminalWrapper(
terminal,
subWidth,
subHeight,
widthOffset,
heightOffset,
)
ch.draw(subWidth, subHeight, wrap)
if self.variableDimension == 0:
widthOffset += want
else:
heightOffset += want
class HBox(_Box):
variableDimension = 0
class VBox(_Box):
variableDimension = 1
class Packer(ContainerWidget):
def render(self, width, height, terminal):
if not self.children:
return
root = int(len(self.children) ** 0.5 + 0.5)
boxes = [VBox() for n in range(root)]
for n, ch in enumerate(self.children):
boxes[n % len(boxes)].addChild(ch)
h = HBox()
map(h.addChild, boxes)
h.render(width, height, terminal)
class Canvas(Widget):
focused = False
contents = None
def __init__(self):
Widget.__init__(self)
self.resize(1, 1)
def resize(self, width, height):
contents = array.array('c', ' ' * width * height)
if self.contents is not None:
for x in range(min(width, self._width)):
for y in range(min(height, self._height)):
contents[width * y + x] = self[x, y]
self.contents = contents
self._width = width
self._height = height
if self.x >= width:
self.x = width - 1
if self.y >= height:
self.y = height - 1
def __getitem__(self, (x, y)):
return self.contents[(self._width * y) + x]
def __setitem__(self, (x, y), value):
self.contents[(self._width * y) + x] = value
def clear(self):
self.contents = array.array('c', ' ' * len(self.contents))
def render(self, width, height, terminal):
if not width or not height:
return
if width != self._width or height != self._height:
self.resize(width, height)
for i in range(height):
terminal.cursorPosition(0, i)
terminal.write(''.join(self.contents[self._width * i:self._width * i + self._width])[:width])
def horizontalLine(terminal, y, left, right):
terminal.selectCharacterSet(insults.CS_DRAWING, insults.G0)
terminal.cursorPosition(left, y)
terminal.write(chr(0161) * (right - left))
terminal.selectCharacterSet(insults.CS_US, insults.G0)
def verticalLine(terminal, x, top, bottom):
terminal.selectCharacterSet(insults.CS_DRAWING, insults.G0)
for n in xrange(top, bottom):
terminal.cursorPosition(x, n)
terminal.write(chr(0170))
terminal.selectCharacterSet(insults.CS_US, insults.G0)
def rectangle(terminal, (top, left), (width, height)):
terminal.selectCharacterSet(insults.CS_DRAWING, insults.G0)
terminal.cursorPosition(top, left)
terminal.write(chr(0154))
terminal.write(chr(0161) * (width - 2))
terminal.write(chr(0153))
for n in range(height - 2):
terminal.cursorPosition(left, top + n + 1)
terminal.write(chr(0170))
terminal.cursorForward(width - 2)
terminal.write(chr(0170))
terminal.cursorPosition(0, top + height - 1)
terminal.write(chr(0155))
terminal.write(chr(0161) * (width - 2))
terminal.write(chr(0152))
terminal.selectCharacterSet(insults.CS_US, insults.G0)
class Border(Widget):
def __init__(self, containee):
Widget.__init__(self)
self.containee = containee
self.containee.parent = self
def focusReceived(self):
return self.containee.focusReceived()
def focusLost(self):
return self.containee.focusLost()
def keystrokeReceived(self, keyID, modifier):
return self.containee.keystrokeReceived(keyID, modifier)
def sizeHint(self):
hint = self.containee.sizeHint()
if hint is None:
hint = (None, None)
if hint[0] is None:
x = None
else:
x = hint[0] + 2
if hint[1] is None:
y = None
else:
y = hint[1] + 2
return x, y
def filthy(self):
self.containee.filthy()
Widget.filthy(self)
def render(self, width, height, terminal):
if self.containee.focused:
terminal.write('\x1b[31m')
rectangle(terminal, (0, 0), (width, height))
terminal.write('\x1b[0m')
wrap = BoundedTerminalWrapper(terminal, width - 2, height - 2, 1, 1)
self.containee.draw(width - 2, height - 2, wrap)
class Button(Widget):
def __init__(self, label, onPress):
Widget.__init__(self)
self.label = label
self.onPress = onPress
def sizeHint(self):
return len(self.label), 1
def characterReceived(self, keyID, modifier):
if keyID == '\r':
self.onPress()
def render(self, width, height, terminal):
terminal.cursorPosition(0, 0)
if self.focused:
terminal.write('\x1b[1m' + self.label + '\x1b[0m')
else:
terminal.write(self.label)
class TextInput(Widget):
def __init__(self, maxwidth, onSubmit):
Widget.__init__(self)
self.onSubmit = onSubmit
self.maxwidth = maxwidth
self.buffer = ''
self.cursor = 0
def setText(self, text):
self.buffer = text[:self.maxwidth]
self.cursor = len(self.buffer)
self.repaint()
def func_LEFT_ARROW(self, modifier):
if self.cursor > 0:
self.cursor -= 1
self.repaint()
def func_RIGHT_ARROW(self, modifier):
if self.cursor < len(self.buffer):
self.cursor += 1
self.repaint()
def backspaceReceived(self):
if self.cursor > 0:
self.buffer = self.buffer[:self.cursor - 1] + self.buffer[self.cursor:]
self.cursor -= 1
self.repaint()
def characterReceived(self, keyID, modifier):
if keyID == '\r':
self.onSubmit(self.buffer)
else:
if len(self.buffer) < self.maxwidth:
self.buffer = self.buffer[:self.cursor] + keyID + self.buffer[self.cursor:]
self.cursor += 1
self.repaint()
def sizeHint(self):
return self.maxwidth + 1, 1
def render(self, width, height, terminal):
currentText = self._renderText()
terminal.cursorPosition(0, 0)
if self.focused:
terminal.write(currentText[:self.cursor])
cursor(terminal, currentText[self.cursor:self.cursor+1] or ' ')
terminal.write(currentText[self.cursor+1:])
terminal.write(' ' * (self.maxwidth - len(currentText) + 1))
else:
more = self.maxwidth - len(currentText)
terminal.write(currentText + '_' * more)
def _renderText(self):
return self.buffer
class PasswordInput(TextInput):
def _renderText(self):
return '*' * len(self.buffer)
class TextOutput(Widget):
text = ''
def __init__(self, size=None):
Widget.__init__(self)
self.size = size
def sizeHint(self):
return self.size
def render(self, width, height, terminal):
terminal.cursorPosition(0, 0)
text = self.text[:width]
terminal.write(text + ' ' * (width - len(text)))
def setText(self, text):
self.text = text
self.repaint()
def focusReceived(self):
raise YieldFocus()
class TextOutputArea(TextOutput):
WRAP, TRUNCATE = range(2)
def __init__(self, size=None, longLines=WRAP):
TextOutput.__init__(self, size)
self.longLines = longLines
def render(self, width, height, terminal):
n = 0
inputLines = self.text.splitlines()
outputLines = []
while inputLines:
if self.longLines == self.WRAP:
wrappedLines = tptext.greedyWrap(inputLines.pop(0), width)
outputLines.extend(wrappedLines or [''])
else:
outputLines.append(inputLines.pop(0)[:width])
if len(outputLines) >= height:
break
for n, L in enumerate(outputLines[:height]):
terminal.cursorPosition(0, n)
terminal.write(L)
class Viewport(Widget):
_xOffset = 0
_yOffset = 0
def xOffset():
def get(self):
return self._xOffset
def set(self, value):
if self._xOffset != value:
self._xOffset = value
self.repaint()
return get, set
xOffset = property(*xOffset())
def yOffset():
def get(self):
return self._yOffset
def set(self, value):
if self._yOffset != value:
self._yOffset = value
self.repaint()
return get, set
yOffset = property(*yOffset())
_width = 160
_height = 24
def __init__(self, containee):
Widget.__init__(self)
self.containee = containee
self.containee.parent = self
self._buf = helper.TerminalBuffer()
self._buf.width = self._width
self._buf.height = self._height
self._buf.connectionMade()
def filthy(self):
self.containee.filthy()
Widget.filthy(self)
def render(self, width, height, terminal):
self.containee.draw(self._width, self._height, self._buf)
# XXX /Lame/
for y, line in enumerate(self._buf.lines[self._yOffset:self._yOffset + height]):
terminal.cursorPosition(0, y)
n = 0
for n, (ch, attr) in enumerate(line[self._xOffset:self._xOffset + width]):
if ch is self._buf.void:
ch = ' '
terminal.write(ch)
if n < width:
terminal.write(' ' * (width - n - 1))
class _Scrollbar(Widget):
def __init__(self, onScroll):
Widget.__init__(self)
self.onScroll = onScroll
self.percent = 0.0
def smaller(self):
self.percent = min(1.0, max(0.0, self.onScroll(-1)))
self.repaint()
def bigger(self):
self.percent = min(1.0, max(0.0, self.onScroll(+1)))
self.repaint()
class HorizontalScrollbar(_Scrollbar):
def sizeHint(self):
return (None, 1)
def func_LEFT_ARROW(self, modifier):
self.smaller()
def func_RIGHT_ARROW(self, modifier):
self.bigger()
_left = u'\N{BLACK LEFT-POINTING TRIANGLE}'
_right = u'\N{BLACK RIGHT-POINTING TRIANGLE}'
_bar = u'\N{LIGHT SHADE}'
_slider = u'\N{DARK SHADE}'
def render(self, width, height, terminal):
terminal.cursorPosition(0, 0)
n = width - 3
before = int(n * self.percent)
after = n - before
me = self._left + (self._bar * before) + self._slider + (self._bar * after) + self._right
terminal.write(me.encode('utf-8'))
class VerticalScrollbar(_Scrollbar):
def sizeHint(self):
return (1, None)
def func_UP_ARROW(self, modifier):
self.smaller()
def func_DOWN_ARROW(self, modifier):
self.bigger()
_up = u'\N{BLACK UP-POINTING TRIANGLE}'
_down = u'\N{BLACK DOWN-POINTING TRIANGLE}'
_bar = u'\N{LIGHT SHADE}'
_slider = u'\N{DARK SHADE}'
def render(self, width, height, terminal):
terminal.cursorPosition(0, 0)
knob = int(self.percent * (height - 2))
terminal.write(self._up.encode('utf-8'))
for i in xrange(1, height - 1):
terminal.cursorPosition(0, i)
if i != (knob + 1):
terminal.write(self._bar.encode('utf-8'))
else:
terminal.write(self._slider.encode('utf-8'))
terminal.cursorPosition(0, height - 1)
terminal.write(self._down.encode('utf-8'))
class ScrolledArea(Widget):
def __init__(self, containee):
Widget.__init__(self, containee)
self._viewport = Viewport(containee)
self._horiz = HorizontalScrollbar(self._horizScroll)
self._vert = VerticalScrollbar(self._vertScroll)
for w in self._viewport, self._horiz, self._vert:
w.parent = self
def _horizScroll(self, n):
self._viewport.xOffset += n
self._viewport.xOffset = max(0, self._viewport.xOffset)
return self._viewport.xOffset / 25.0
def _vertScroll(self, n):
self._viewport.yOffset += n
self._viewport.yOffset = max(0, self._viewport.yOffset)
return self._viewport.yOffset / 25.0
def func_UP_ARROW(self, modifier):
self._vert.smaller()
def func_DOWN_ARROW(self, modifier):
self._vert.bigger()
def func_LEFT_ARROW(self, modifier):
self._horiz.smaller()
def func_RIGHT_ARROW(self, modifier):
self._horiz.bigger()
def filthy(self):
self._viewport.filthy()
self._horiz.filthy()
self._vert.filthy()
Widget.filthy(self)
def render(self, width, height, terminal):
wrapper = BoundedTerminalWrapper(terminal, width - 2, height - 2, 1, 1)
self._viewport.draw(width - 2, height - 2, wrapper)
if self.focused:
terminal.write('\x1b[31m')
horizontalLine(terminal, 0, 1, width - 1)
verticalLine(terminal, 0, 1, height - 1)
self._vert.draw(1, height - 1, BoundedTerminalWrapper(terminal, 1, height - 1, width - 1, 0))
self._horiz.draw(width, 1, BoundedTerminalWrapper(terminal, width, 1, 0, height - 1))
terminal.write('\x1b[0m')
def cursor(terminal, ch):
terminal.saveCursor()
terminal.selectGraphicRendition(str(insults.REVERSE_VIDEO))
terminal.write(ch)
terminal.restoreCursor()
terminal.cursorForward()
class Selection(Widget):
# Index into the sequence
focusedIndex = 0
# Offset into the displayed subset of the sequence
renderOffset = 0
def __init__(self, sequence, onSelect, minVisible=None):
Widget.__init__(self)
self.sequence = sequence
self.onSelect = onSelect
self.minVisible = minVisible
if minVisible is not None:
self._width = max(map(len, self.sequence))
def sizeHint(self):
if self.minVisible is not None:
return self._width, self.minVisible
def func_UP_ARROW(self, modifier):
if self.focusedIndex > 0:
self.focusedIndex -= 1
if self.renderOffset > 0:
self.renderOffset -= 1
self.repaint()
def func_PGUP(self, modifier):
if self.renderOffset != 0:
self.focusedIndex -= self.renderOffset
self.renderOffset = 0
else:
self.focusedIndex = max(0, self.focusedIndex - self.height)
self.repaint()
def func_DOWN_ARROW(self, modifier):
if self.focusedIndex < len(self.sequence) - 1:
self.focusedIndex += 1
if self.renderOffset < self.height - 1:
self.renderOffset += 1
self.repaint()
def func_PGDN(self, modifier):
if self.renderOffset != self.height - 1:
change = self.height - self.renderOffset - 1
if change + self.focusedIndex >= len(self.sequence):
change = len(self.sequence) - self.focusedIndex - 1
self.focusedIndex += change
self.renderOffset = self.height - 1
else:
self.focusedIndex = min(len(self.sequence) - 1, self.focusedIndex + self.height)
self.repaint()
def characterReceived(self, keyID, modifier):
if keyID == '\r':
self.onSelect(self.sequence[self.focusedIndex])
def render(self, width, height, terminal):
self.height = height
start = self.focusedIndex - self.renderOffset
if start > len(self.sequence) - height:
start = max(0, len(self.sequence) - height)
elements = self.sequence[start:start+height]
for n, ele in enumerate(elements):
terminal.cursorPosition(0, n)
if n == self.renderOffset:
terminal.saveCursor()
if self.focused:
modes = str(insults.REVERSE_VIDEO), str(insults.BOLD)
else:
modes = str(insults.REVERSE_VIDEO),
terminal.selectGraphicRendition(*modes)
text = ele[:width]
terminal.write(text + (' ' * (width - len(text))))
if n == self.renderOffset:
terminal.restoreCursor()
| gpl-2.0 |
shadow-identity/django-inplaceedit | inplaceeditform/commons.py | 17 | 5629 | # Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django import template
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.conf import settings
from inplaceeditform import settings as inplace_settings
from inplaceeditform.adaptors import ADAPTOR_INPLACEEDIT as DEFAULT_ADAPTOR_INPLACEEDIT
has_transmeta = False
DEFAULT_VALUE = ''
try:
import transmeta
has_transmeta = True
except ImportError:
pass
def get_dict_from_obj(obj):
'''
Edit to get the dict even when the object is a GenericRelatedObjectManager.
Added the try except.
'''
obj_dict = obj.__dict__
obj_dict_result = obj_dict.copy()
for key, value in obj_dict.items():
if key.endswith('_id'):
key2 = key.replace('_id', '')
try:
field, model, direct, m2m = obj._meta.get_field_by_name(key2)
if isinstance(field, ForeignKey):
obj_dict_result[key2] = obj_dict_result[key]
del obj_dict_result[key]
except FieldDoesNotExist:
pass
manytomany_list = obj._meta.many_to_many
for manytomany in manytomany_list:
ids = [obj_rel.id for obj_rel in manytomany.value_from_object(obj).select_related()]
if ids:
obj_dict_result[manytomany.name] = ids
return obj_dict_result
def apply_filters(value, filters, load_tags=None):
if filters:
filters_str = '|%s' % '|'.join(filters)
load_tags = load_tags or []
if load_tags:
load_tags_str = "{%% load %s %%}" % ' '.join(load_tags)
else:
load_tags_str = ""
value = template.Template("""%s{{ value%s }}""" % (load_tags_str, filters_str)).render(template.Context({'value': value}))
return value
def import_module(name, package=None):
try:
from django.utils.importlib import import_module
return import_module(name, package)
except ImportError:
path = [m for m in name.split('.')]
return __import__(name, {}, {}, path[-1])
def get_adaptor_class(adaptor=None, obj=None, field_name=None):
if not adaptor:
try:
field = obj._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
if has_transmeta:
field = obj._meta.get_field_by_name(transmeta.get_real_fieldname(field_name))[0]
if isinstance(field, models.URLField):
adaptor = 'url'
elif isinstance(field, models.EmailField):
adaptor = 'email'
elif isinstance(field, models.CharField):
adaptor = 'text'
if getattr(field, 'choices', None):
adaptor = 'choices'
elif isinstance(field, models.TextField):
adaptor = 'textarea'
elif isinstance(field, models.NullBooleanField):
adaptor = 'nullboolean'
elif isinstance(field, models.BooleanField):
adaptor = 'boolean'
elif isinstance(field, models.DateTimeField):
adaptor = 'datetime'
elif isinstance(field, models.DateField):
adaptor = 'date'
elif isinstance(field, models.TimeField):
adaptor = 'time'
elif isinstance(field, models.IntegerField):
adaptor = 'integer'
elif isinstance(field, models.FloatField):
adaptor = 'float'
elif isinstance(field, models.DecimalField):
adaptor = 'decimal'
elif isinstance(field, ForeignKey):
adaptor = 'fk'
elif isinstance(field, ManyToManyField):
adaptor = 'm2mcomma'
elif isinstance(field, models.ImageField):
adaptor = 'image'
elif isinstance(field, models.FileField):
adaptor = 'file'
from inplaceeditform.fields import BaseAdaptorField
path_adaptor = adaptor and (inplace_settings.ADAPTOR_INPLACEEDIT.get(adaptor, None) or
DEFAULT_ADAPTOR_INPLACEEDIT.get(adaptor, None))
if not path_adaptor and adaptor:
return get_adaptor_class(obj=obj, field_name=field_name)
elif not path_adaptor:
return BaseAdaptorField
path_module, class_adaptor = ('.'.join(path_adaptor.split('.')[:-1]), path_adaptor.split('.')[-1])
return getattr(import_module(path_module), class_adaptor)
def get_static_url(subfix='inplaceeditform'):
static_url = getattr(settings, 'STATIC_URL', None)
if static_url:
return static_url
else: # To old django versions
return '%s%s/' % (getattr(settings, 'MEDIA_URL', None), subfix)
def get_admin_static_url():
"""
Return the ADMIN_MEDIA_PREFIX if it is in the settings.py else get
the static url from the previous function and add /admin/.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', get_static_url() + "admin/")
| lgpl-3.0 |
plamut/superdesk | server/apps/highlights/generate.py | 1 | 3166 |
import superdesk
from bs4 import BeautifulSoup
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from flask import render_template
from jinja2 import Template
def getTemplate(highlightId):
"""Return the string template associated with highlightId or none """
if not highlightId:
return None
highlightService = superdesk.get_resource_service('highlights')
highlight = highlightService.find_one(req=None, _id=highlightId)
if not highlight or not highlight.get('template'):
return None
templateService = superdesk.get_resource_service('content_templates')
template = templateService.find_one(req=None, _id=highlight.get('template'))
if not template or 'body_html' not in template:
return None
return template.get('body_html')
class GenerateHighlightsService(superdesk.Service):
def create(self, docs, **kwargs):
"""Generate highlights text item for given package.
If doc.preview is True it won't save the item, only return.
"""
service = superdesk.get_resource_service('archive')
for doc in docs:
preview = doc.get('preview', False)
package = service.find_one(req=None, _id=doc['package'])
if not package:
superdesk.abort(404)
stringTemplate = getTemplate(package.get('highlight'))
doc.clear()
doc[ITEM_TYPE] = CONTENT_TYPE.TEXT
doc['headline'] = package.get('headline')
doc['slugline'] = package.get('slugline')
doc['byline'] = package.get('byline')
doc['task'] = package.get('task')
doc['family_id'] = package.get('guid')
items = []
for group in package.get('groups', []):
for ref in group.get('refs', []):
if 'residRef' in ref:
item = service.find_one(req=None, _id=ref.get('residRef'))
if item:
html = item.get('body_html')
if html:
soup = BeautifulSoup(html, "html.parser")
item['first_paragraph_body_html'] = str(soup.p)
items.append(item)
if stringTemplate:
template = Template(stringTemplate)
doc['body_html'] = template.render(package=package, items=items)
else:
doc['body_html'] = render_template('default_highlight_template.txt', package=package, items=items)
if preview:
return ['' for doc in docs]
else:
return service.post(docs, **kwargs)
class GenerateHighlightsResource(superdesk.Resource):
"""Generate highlights item for given package."""
schema = {
'package': {
# not setting relation here, we will fetch it anyhow
'type': 'string',
'required': True,
},
'preview': {
'type': 'boolean',
'default': False,
}
}
resource_methods = ['POST']
item_methods = []
privileges = {'POST': 'highlights'}
| agpl-3.0 |
servioticy/servioticy-demo | cellphones-wp2demo/brain/venv/lib/python2.7/site-packages/pip/commands/list.py | 269 | 7251 | from __future__ import absolute_import
import logging
from pip._vendor import pkg_resources
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.wheel import WheelCache
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
canonical_name = pkg_resources.safe_name(req.name).lower()
formats = fmt_ctl_formats(format_control, canonical_name)
search = Search(
req.name,
canonical_name,
formats)
remote_version = finder._link_package_versions(
link, search).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| apache-2.0 |
andaag/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
drnextgis/QGIS | python/plugins/processing/algs/qgis/Datasources2Vrt.py | 5 | 7861 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2015
Copyright : (C) 2015 by Luigi Pirelli
Email : luipir at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Luigi Pirelli'
__date__ = 'May 2015'
__copyright__ = '(C) 2015, Luigi Pirelli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import codecs
import xml.sax.saxutils
from osgeo import ogr
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputFile
from processing.core.outputs import OutputString
class Datasources2Vrt(GeoAlgorithm):
DATASOURCES = 'DATASOURCES'
UNIONED = 'UNIONED'
VRT_FILE = 'VRT_FILE'
VRT_STRING = 'VRT_STRING'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Build virtual vector')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterMultipleInput(self.DATASOURCES,
self.tr('Input datasources')))
self.addParameter(ParameterBoolean(self.UNIONED,
self.tr('Create "unioned" VRT'),
default=False))
self.addOutput(OutputFile(self.VRT_FILE,
self.tr('Virtual vector'), ext='vrt'))
self.addOutput(OutputString(self.VRT_STRING,
self.tr('Virtual string')))
def processAlgorithm(self, progress):
input_layers = self.getParameterValue(self.DATASOURCES)
unioned = self.getParameterValue(self.UNIONED)
vrtPath = self.getOutputValue(self.VRT_FILE)
vrtString = self.getOutputValue(self.VRT_STRING)
layers = input_layers.split(';')
vrtString = self.mergeDataSources2Vrt(layers,
vrtPath,
union=unioned,
relative=False,
schema=False,
progress=progress)
self.setOutputValue(self.VRT_STRING, vrtString)
def mergeDataSources2Vrt(self, dataSources, outFile, union=False, relative=False,
schema=False, progress=None):
'''Function to do the work of merging datasources in a single vrt format
@param data_sources: Array of path strings
@param outfile: the output vrt file to generate
@param relative: Write relative flag. DOES NOT relativise paths. They have to be already relative
@param schema: Schema flag
@return: vrt in string format
'''
vrt = '<OGRVRTDataSource>'
if union:
vrt += '<OGRVRTUnionLayer name="UnionedLayer">'
total = 100.0 / len(dataSources)
for current, inFile in enumerate(dataSources):
progress.setPercentage(int(current * total))
srcDS = ogr.Open(inFile, 0)
if srcDS is None:
raise GeoAlgorithmExecutionException(
self.tr('Invalid datasource: {}'.format(inFile)))
if schema:
inFile = '@dummy@'
for layer in srcDS:
layerDef = layer.GetLayerDefn()
layerName = layerDef.GetName()
vrt += '<OGRVRTLayer name="{}">'.format(self.XmlEsc(layerName))
vrt += '<SrcDataSource relativeToVRT="{}" shared="{}">{}</SrcDataSource>'.format(1 if relative else 0, not schema, self.XmlEsc(inFile))
if schema:
vrt += '<SrcLayer>@dummy@</SrcLayer>'
else:
vrt += '<SrcLayer>{}</SrcLayer>'.format(self.XmlEsc(layerName))
vrt += '<GeometryType>{}</GeometryType>'.format(self.GeomType2Name(layerDef.GetGeomType()))
crs = layer.GetSpatialRef()
if crs is not None:
vrt += '<LayerSRS>{}</LayerSRS>'.format(self.XmlEsc(crs.ExportToWkt()))
# Process all the fields.
for fieldIdx in range(layerDef.GetFieldCount()):
fieldDef = layerDef.GetFieldDefn(fieldIdx)
vrt += '<Field name="{}" type="{}"'.format(self.XmlEsc(fieldDef.GetName()), self.fieldType2Name(fieldDef.GetType()))
if not schema:
vrt += ' src="{}"'.format(self.XmlEsc(fieldDef.GetName()))
if fieldDef.GetWidth() > 0:
vrt += ' width="{}"'.format(fieldDef.GetWidth())
if fieldDef.GetPrecision() > 0:
vrt += ' precision="{}"'.format(fieldDef.GetPrecision())
vrt += '/>'
vrt += '</OGRVRTLayer>'
srcDS.Destroy()
if union:
vrt += '</OGRVRTUnionLayer>'
vrt += '</OGRVRTDataSource>'
#TODO: pretty-print XML
if outFile is not None:
with codecs.open(outFile, 'w') as f:
f.write(vrt)
return vrt
def GeomType2Name(self, geomType):
if geomType == ogr.wkbUnknown:
return 'wkbUnknown'
elif geomType == ogr.wkbPoint:
return 'wkbPoint'
elif geomType == ogr.wkbLineString:
return 'wkbLineString'
elif geomType == ogr.wkbPolygon:
return 'wkbPolygon'
elif geomType == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif geomType == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif geomType == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif geomType == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif geomType == ogr.wkbNone:
return 'wkbNone'
elif geomType == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
def fieldType2Name(self, fieldType):
if fieldType == ogr.OFTInteger:
return 'Integer'
elif fieldType == ogr.OFTString:
return 'String'
elif fieldType == ogr.OFTReal:
return 'Real'
elif fieldType == ogr.OFTStringList:
return 'StringList'
elif fieldType == ogr.OFTIntegerList:
return 'IntegerList'
elif fieldType == ogr.OFTRealList:
return 'RealList'
elif fieldType == ogr.OFTBinary:
return 'Binary'
elif fieldType == ogr.OFTDate:
return 'Date'
elif fieldType == ogr.OFTTime:
return 'Time'
elif fieldType == ogr.OFTDateTime:
return 'DateTime'
else:
return 'String'
def XmlEsc(self, text):
return xml.sax.saxutils.escape(text)
| gpl-2.0 |
lsqtongxin/django | tests/many_to_one/tests.py | 26 | 31504 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, First, Parent, Record, Relation, Reporter,
School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_assign_unsaved_check_override(self):
"""
#24495 - Assigning an unsaved object to a ForeignKey
should be allowed when the allow_unsaved_instance_assignment
attribute has been set to True.
"""
class UnsavedForeignKey(models.ForeignKey):
# A ForeignKey which can point to an unsaved object
allow_unsaved_instance_assignment = True
class Band(models.Model):
name = models.CharField(max_length=50)
class BandMember(models.Model):
band = UnsavedForeignKey(Band, models.CASCADE)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
beatles = Band(name='The Beatles')
john = BandMember(first_name='John', last_name='Lennon')
# This should not raise an exception as the ForeignKey between member
# and band has allow_unsaved_instance_assignment=True.
john.band = beatles
self.assertEqual(john.band, beatles)
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(sorted(f.name for f in Reporter._meta.get_fields())),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (p, Child.parent.field.remote_field.model._meta.object_name)):
Child(parent=p)
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (p, Child.parent.field.remote_field.model._meta.object_name)):
ToFieldChild(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
self.assertRaises(School.DoesNotExist, lambda: private_student.school)
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| bsd-3-clause |
spikelynch/bots | botclient/botclient.py | 1 | 4550 | """
bot - a basic Bot class
"""
from twitterbot import TwitterBot
from mastodonbot import MastodonBot
import argparse, yaml, pystache, random, time, sys
SERVICES = {
'Twitter': TwitterBot,
'Mastodon': MastodonBot
}
class Bot(object):
"""A bot base class.
This is a base class for bots which provides methods to do the
authentication and posting (of text-only tweets and ones with images),
plus a few commonly-needed functions like parsing command-line arguments,
reading a config file and filling out templates.
Attributes:
ap (ArgumentParser): the argument parser
args (Namespace): the results of the argument parse
cf (dict): the values read from the config file
"""
def __init__(self):
"""Create a Bot
The base class __init__ creates an argpase.ArgumentParser and adds two
arguments to it: --config (mndatory, the config file name) and --dry-run
(optional flag to just create the tweet and print to stdout rather than
post it).
To add more arguments in your subclass, call super().__init__ at the
start of your class's __init__ method.
This method doesn't parse the args (this is so that the subclass can add
new ones) - that's done in the configure method.
"""
self.ap = argparse.ArgumentParser()
self.ap.add_argument('-s', '--service', default="Twitter", help="Twitter or Mastodon")
self.ap.add_argument('-c', '--config', required=True, type=str, help="Config file")
self.ap.add_argument('-d', '--dry-run', action='store_true', help="Don't post")
def configure(self, mandatory_fields=None):
"""Parse command-line arguments and read the config file.
If there are any missing arguments or reading the config file fails for any
reason, this calls sys.exit().
"""
self.args = self.ap.parse_args()
if self.args.service not in SERVICES:
print("Don't know how to post to {}".format(self.args.service))
sys.exit(-1)
self.api = SERVICES[self.args.service]()
self.cf = None
with open(self.args.config) as cf:
try:
self.cf = yaml.load(cf)
except yaml.YAMLError as exc:
print("%s parse error: %s" % ( self.args.config, exc ))
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
print("Error position: (%s:%s)" % (mark.line + 1, mark.column + 1))
if not self.cf:
print("Config error")
sys.exit(-1)
fields = self.api.auth_cf_fields
if mandatory_fields:
fields += mandatory_fields
cf_missing = False
for field in fields:
if not field in self.cf:
print("Missing mandatory field: %s" % field)
cf_missing = True
if cf_missing:
sys.exit(-1)
def render(self):
"""Render a moustache template with this object and return the result"""
pyr = pystache.Renderer()
return pyr.render(self)
def post(self, text, options=None):
"""Post a string, or write it to stdout if we're dry-run"""
if self.args.dry_run:
print("Dry run: not posting")
print("Status: %s" % text)
if options:
print("Options: %s" % options)
return True
if self.api.auth(self.cf):
return self.api.post(text, options)
def post_image(self, imgfile, text, options=None):
"""Post a tweet with one attached image
Args:
img (str): image file
text (str): text part of post
Returns:
status (bool): True if the post was successful
"""
if self.args.dry_run:
print("Dry run: not posting")
print("Imagefile: %s" % imgfile)
print("Text: %s" % text)
if options:
print("Options: %s" % options)
return True
if self.api.auth(self.cf):
return self.api.post_image(imgfile, text, options)
def wait(self):
"""Wait for a random interval
If there's a config variable 'pause', this method picks a random
number between 0 and pause, and sleeps for that number of seconds.
This is a simple way to make bot timing be less metronomic: you
can set a cron job and then have a bit of scatter"""
if 'pause' in self.cf:
pause = random.randrange(0, int(self.cf['pause']))
print("Waiting for {}".format(pause))
time.sleep(pause)
| gpl-2.0 |
umitproject/openmonitor-aggregator | django/contrib/gis/gdal/prototypes/errcheck.py | 404 | 4207 | """
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import check_err, OGRException, SRSException
from django.contrib.gis.gdal.libgdal import lgdal
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-refernece value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
def check_bool(result, func, cargs):
"Returns the boolean evaluation of the value."
if bool(result): return True
else: return False
### String checking Routines ###
def check_const_string(result, func, cargs, offset=None):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr: s = None
else: s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory beind GDAL pointer
# w/the VSIFree routine.
if ptr: lgdal.VSIFree(ptr)
return s
### DataSource, Layer error-checking ###
### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
### Other error-checking routines ###
def check_arg_errcode(result, func, cargs):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs))
return result
def check_errcode(result, func, cargs):
"""
Check the error code returned (c_int).
"""
check_err(result)
return
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if isinstance(result, (int, long)):
result = c_void_p(result)
if bool(result):
return result
else:
raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and tring values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value
| agpl-3.0 |
tjw/swift | utils/process-stats-dir.py | 6 | 24786 | #!/usr/bin/python
#
# ==-- process-stats-dir - summarize one or more Swift -stats-output-dirs --==#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014-2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ==------------------------------------------------------------------------==#
#
# This file processes the contents of one or more directories generated by
# `swiftc -stats-output-dir` and emits summary data, traces etc. for analysis.
import argparse
import csv
import itertools
import json
import os
import platform
import re
import sys
import time
import urllib
import urllib2
from collections import namedtuple
from operator import attrgetter
from jobstats import load_stats_dir, merge_all_jobstats
MODULE_PAT = re.compile('^(\w+)\.')
def module_name_of_stat(name):
return re.match(MODULE_PAT, name).groups()[0]
def stat_name_minus_module(name):
return re.sub(MODULE_PAT, '', name)
# Perform any custom processing of args here, in particular the
# select_stats_from_csv_baseline step, which is a bit subtle.
def vars_of_args(args):
vargs = vars(args)
if args.select_stats_from_csv_baseline is not None:
b = read_stats_dict_from_csv(args.select_stats_from_csv_baseline)
# Sniff baseline stat-names to figure out if they're module-qualified
# even when the user isn't asking us to _output_ module-grouped data.
all_triples = all(len(k.split('.')) == 3 for k in b.keys())
if args.group_by_module or all_triples:
vargs['select_stat'] = set(stat_name_minus_module(k)
for k in b.keys())
else:
vargs['select_stat'] = b.keys()
return vargs
# Passed args with 2-element remainder ["old", "new"], return a list of tuples
# of the form [(name, (oldstats, newstats))] where each name is a common subdir
# of each of "old" and "new", and the stats are those found in the respective
# dirs.
def load_paired_stats_dirs(args):
assert(len(args.remainder) == 2)
paired_stats = []
(old, new) = args.remainder
vargs = vars_of_args(args)
for p in sorted(os.listdir(old)):
full_old = os.path.join(old, p)
full_new = os.path.join(new, p)
if not (os.path.exists(full_old) and os.path.isdir(full_old) and
os.path.exists(full_new) and os.path.isdir(full_new)):
continue
old_stats = load_stats_dir(full_old, **vargs)
new_stats = load_stats_dir(full_new, **vargs)
if len(old_stats) == 0 or len(new_stats) == 0:
continue
paired_stats.append((p, (old_stats, new_stats)))
return paired_stats
def write_catapult_trace(args):
allstats = []
vargs = vars_of_args(args)
for path in args.remainder:
allstats += load_stats_dir(path, **vargs)
allstats.sort(key=attrgetter('start_usec'))
for i in range(len(allstats)):
allstats[i].jobid = i
json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)
def write_lnt_values(args):
vargs = vars_of_args(args)
for d in args.remainder:
stats = load_stats_dir(d, **vargs)
merged = merge_all_jobstats(stats, **vargs)
j = merged.to_lnt_test_obj(args)
if args.lnt_submit is None:
json.dump(j, args.output, indent=4)
else:
url = args.lnt_submit
print "\nsubmitting to LNT server: " + url
json_report = {'input_data': json.dumps(j), 'commit': '1'}
data = urllib.urlencode(json_report)
response_str = urllib2.urlopen(urllib2.Request(url, data))
response = json.loads(response_str.read())
print "### response:"
print response
if 'success' in response:
print "server response:\tSuccess"
else:
print "server response:\tError"
print "error:\t", response['error']
sys.exit(1)
def show_paired_incrementality(args):
fieldnames = ["old_pct", "old_skip",
"new_pct", "new_skip",
"delta_pct", "delta_skip",
"name"]
out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
out.writeheader()
vargs = vars_of_args(args)
for (name, (oldstats, newstats)) in load_paired_stats_dirs(args):
olddriver = merge_all_jobstats((x for x in oldstats
if x.is_driver_job()), **vargs)
newdriver = merge_all_jobstats((x for x in newstats
if x.is_driver_job()), **vargs)
if olddriver is None or newdriver is None:
continue
oldpct = olddriver.incrementality_percentage()
newpct = newdriver.incrementality_percentage()
deltapct = newpct - oldpct
oldskip = olddriver.driver_jobs_skipped()
newskip = newdriver.driver_jobs_skipped()
deltaskip = newskip - oldskip
out.writerow(dict(name=name,
old_pct=oldpct, old_skip=oldskip,
new_pct=newpct, new_skip=newskip,
delta_pct=deltapct, delta_skip=deltaskip))
def show_incrementality(args):
fieldnames = ["incrementality", "name"]
out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
out.writeheader()
vargs = vars_of_args(args)
for path in args.remainder:
stats = load_stats_dir(path, **vargs)
for s in stats:
if s.is_driver_job():
pct = s.incrementality_percentage()
out.writerow(dict(name=os.path.basename(path),
incrementality=pct))
def diff_and_pct(old, new):
if old == 0:
if new == 0:
return (0, 0.0)
else:
return (new, 100.0)
delta = (new - old)
delta_pct = round((float(delta) / float(old)) * 100.0, 2)
return (delta, delta_pct)
def update_epoch_value(d, name, epoch, value):
changed = 0
if name in d:
(existing_epoch, existing_value) = d[name]
if existing_epoch > epoch:
print("note: keeping newer value %d from epoch %d for %s"
% (existing_value, existing_epoch, name))
epoch = existing_epoch
value = existing_value
elif existing_value == value:
epoch = existing_epoch
else:
(_, delta_pct) = diff_and_pct(existing_value, value)
print ("note: changing value %d -> %d (%.2f%%) for %s" %
(existing_value, value, delta_pct, name))
changed = 1
d[name] = (epoch, value)
return (epoch, value, changed)
def read_stats_dict_from_csv(f, select_stat=''):
infieldnames = ["epoch", "name", "value"]
c = csv.DictReader(f, infieldnames,
dialect='excel-tab',
quoting=csv.QUOTE_NONNUMERIC)
d = {}
sre = re.compile('.*' if len(select_stat) == 0 else
'|'.join(select_stat))
for row in c:
epoch = int(row["epoch"])
name = row["name"]
if sre.search(name) is None:
continue
value = int(row["value"])
update_epoch_value(d, name, epoch, value)
return d
# The idea here is that a "baseline" is a (tab-separated) CSV file full of
# the counters you want to track, each prefixed by an epoch timestamp of
# the last time the value was reset.
#
# When you set a fresh baseline, all stats in the provided stats dir are
# written to the baseline. When you set against an _existing_ baseline,
# only the counters mentioned in the existing baseline are updated, and
# only if their values differ.
#
# Finally, since it's a line-oriented CSV file, you can put:
#
# mybaseline.csv merge=union
#
# in your .gitattributes file, and forget about merge conflicts. The reader
# function above will take the later epoch anytime it detects duplicates,
# so union-merging is harmless. Duplicates will be eliminated whenever the
# next baseline-set is done.
def set_csv_baseline(args):
existing = None
vargs = vars_of_args(args)
if os.path.exists(args.set_csv_baseline):
with open(args.set_csv_baseline, "r") as f:
ss = vargs['select_stat']
existing = read_stats_dict_from_csv(f, select_stat=ss)
print ("updating %d baseline entries in %s" %
(len(existing), args.set_csv_baseline))
else:
print "making new baseline " + args.set_csv_baseline
fieldnames = ["epoch", "name", "value"]
with open(args.set_csv_baseline, "wb") as f:
out = csv.DictWriter(f, fieldnames, dialect='excel-tab',
quoting=csv.QUOTE_NONNUMERIC)
m = merge_all_jobstats((s for d in args.remainder
for s in load_stats_dir(d, **vargs)),
**vargs)
if m is None:
print "no stats found"
return 1
changed = 0
newepoch = int(time.time())
for name in sorted(m.stats.keys()):
epoch = newepoch
value = m.stats[name]
if existing is not None:
if name not in existing:
continue
(epoch, value, chg) = update_epoch_value(existing, name,
epoch, value)
changed += chg
out.writerow(dict(epoch=int(epoch),
name=name,
value=int(value)))
if existing is not None:
print "changed %d entries in baseline" % changed
return 0
OutputRow = namedtuple("OutputRow",
["name", "old", "new",
"delta", "delta_pct"])
def compare_stats(args, old_stats, new_stats):
for name in sorted(old_stats.keys()):
old = old_stats[name]
new = new_stats.get(name, 0)
(delta, delta_pct) = diff_and_pct(old, new)
yield OutputRow(name=name,
old=int(old), new=int(new),
delta=int(delta),
delta_pct=delta_pct)
IMPROVED = -1
UNCHANGED = 0
REGRESSED = 1
def row_state(row, args):
delta_pct_over_thresh = abs(row.delta_pct) > args.delta_pct_thresh
if (row.name.startswith("time.") or '.time.' in row.name):
# Timers are judged as changing if they exceed
# the percentage _and_ absolute-time thresholds
delta_usec_over_thresh = abs(row.delta) > args.delta_usec_thresh
if delta_pct_over_thresh and delta_usec_over_thresh:
return (REGRESSED if row.delta > 0 else IMPROVED)
elif delta_pct_over_thresh:
return (REGRESSED if row.delta > 0 else IMPROVED)
return UNCHANGED
def write_comparison(args, old_stats, new_stats):
rows = list(compare_stats(args, old_stats, new_stats))
sort_key = (attrgetter('delta_pct')
if args.sort_by_delta_pct
else attrgetter('name'))
regressed = [r for r in rows if row_state(r, args) == REGRESSED]
unchanged = [r for r in rows if row_state(r, args) == UNCHANGED]
improved = [r for r in rows if row_state(r, args) == IMPROVED]
regressions = len(regressed)
if args.markdown:
def format_time(v):
if abs(v) > 1000000:
return "{:.1f}s".format(v / 1000000.0)
elif abs(v) > 1000:
return "{:.1f}ms".format(v / 1000.0)
else:
return "{:.1f}us".format(v)
def format_field(field, row):
if field == 'name':
if args.group_by_module:
return stat_name_minus_module(row.name)
else:
return row.name
elif field == 'delta_pct':
s = str(row.delta_pct) + "%"
if args.github_emoji:
if row_state(row, args) == REGRESSED:
s += " :no_entry:"
elif row_state(row, args) == IMPROVED:
s += " :white_check_mark:"
return s
else:
v = int(vars(row)[field])
if row.name.startswith('time.'):
return format_time(v)
else:
return "{:,d}".format(v)
def format_table(elts):
out = args.output
out.write('\n')
out.write(' | '.join(OutputRow._fields))
out.write('\n')
out.write(' | '.join('---:' for _ in OutputRow._fields))
out.write('\n')
for e in elts:
out.write(' | '.join(format_field(f, e)
for f in OutputRow._fields))
out.write('\n')
def format_details(name, elts, is_closed):
out = args.output
details = '<details>\n' if is_closed else '<details open>\n'
out.write(details)
out.write('<summary>%s (%d)</summary>\n'
% (name, len(elts)))
if args.group_by_module:
def keyfunc(e):
return module_name_of_stat(e.name)
elts.sort(key=attrgetter('name'))
for mod, group in itertools.groupby(elts, keyfunc):
groupelts = list(group)
groupelts.sort(key=sort_key, reverse=args.sort_descending)
out.write(details)
out.write('<summary>%s in %s (%d)</summary>\n'
% (name, mod, len(groupelts)))
format_table(groupelts)
out.write('</details>\n')
else:
elts.sort(key=sort_key, reverse=args.sort_descending)
format_table(elts)
out.write('</details>\n')
closed_regressions = (args.close_regressions or len(regressed) == 0)
format_details('Regressed', regressed, closed_regressions)
format_details('Improved', improved, True)
format_details('Unchanged (delta < %s%% or delta < %s)' %
(args.delta_pct_thresh,
format_time(args.delta_usec_thresh)),
unchanged, True)
else:
rows.sort(key=sort_key, reverse=args.sort_descending)
out = csv.DictWriter(args.output, OutputRow._fields,
dialect='excel-tab')
out.writeheader()
for row in rows:
if row_state(row, args) != UNCHANGED:
out.writerow(row._asdict())
return regressions
def compare_to_csv_baseline(args):
vargs = vars_of_args(args)
old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline,
select_stat=vargs['select_stat'])
m = merge_all_jobstats((s for d in args.remainder
for s in load_stats_dir(d, **vargs)),
**vargs)
old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
new_stats = m.stats
return write_comparison(args, old_stats, new_stats)
# Summarize immediate difference between two stats-dirs, optionally
def compare_stats_dirs(args):
if len(args.remainder) != 2:
raise ValueError("Expected exactly 2 stats-dirs")
vargs = vars_of_args(args)
(old, new) = args.remainder
old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)
return write_comparison(args, old_stats.stats, new_stats.stats)
# Evaluate a boolean expression in terms of the provided stats-dir; all stats
# are projected into python dicts (thus variables in the eval expr) named by
# the last identifier in the stat definition. This means you can evaluate
# things like 'NumIRInsts < 1000' or
# 'NumTypesValidated == NumTypesDeserialized'
def evaluate(args):
if len(args.remainder) != 1:
raise ValueError("Expected exactly 1 stats-dir to evaluate against")
d = args.remainder[0]
vargs = vars_of_args(args)
merged = merge_all_jobstats(load_stats_dir(d, **vargs), **vargs)
env = {}
ident = re.compile('(\w+)$')
for (k, v) in merged.stats.items():
if k.startswith("time.") or '.time.' in k:
continue
m = re.search(ident, k)
if m:
i = m.groups()[0]
if args.verbose:
print("%s => %s" % (i, v))
env[i] = v
try:
if eval(args.evaluate, env):
return 0
else:
print("evaluate condition failed: '%s'" % args.evaluate)
return 1
except Exception as e:
print(e)
return 1
# Evaluate a boolean expression in terms of deltas between the provided two
# stats-dirs; works like evaluate() above but on absolute differences
def evaluate_delta(args):
if len(args.remainder) != 2:
raise ValueError("Expected exactly 2 stats-dirs to evaluate-delta")
(old, new) = args.remainder
vargs = vars_of_args(args)
old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)
env = {}
ident = re.compile('(\w+)$')
for r in compare_stats(args, old_stats.stats, new_stats.stats):
if r.name.startswith("time.") or '.time.' in r.name:
continue
m = re.search(ident, r.name)
if m:
i = m.groups()[0]
if args.verbose:
print("%s => %s" % (i, r.delta))
env[i] = r.delta
try:
if eval(args.evaluate_delta, env):
return 0
else:
print("evaluate-delta condition failed: '%s'" %
args.evaluate_delta)
return 1
except Exception as e:
print(e)
return 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action="store_true",
help="Report activity verbosely")
parser.add_argument("--output", default="-",
type=argparse.FileType('wb', 0),
help="Write output to file")
parser.add_argument("--paired", action="store_true",
help="Process two dirs-of-stats-dirs, pairwise")
parser.add_argument("--delta-pct-thresh", type=float, default=0.01,
help="Percentage change required to report")
parser.add_argument("--delta-usec-thresh", type=int, default=100000,
help="Absolute delta on times required to report")
parser.add_argument("--lnt-machine", type=str, default=platform.node(),
help="Machine name for LNT submission")
parser.add_argument("--lnt-run-info", action='append', default=[],
type=lambda kv: kv.split("="),
help="Extra key=value pairs for LNT run-info")
parser.add_argument("--lnt-machine-info", action='append', default=[],
type=lambda kv: kv.split("="),
help="Extra key=value pairs for LNT machine-info")
parser.add_argument("--lnt-order", type=str,
default=str(int(time.time())),
help="Order for LNT submission")
parser.add_argument("--lnt-tag", type=str, default="swift-compile",
help="Tag for LNT submission")
parser.add_argument("--lnt-submit", type=str, default=None,
help="URL to submit LNT data to (rather than print)")
parser.add_argument("--select-module",
default=[],
action="append",
help="Select specific modules")
parser.add_argument("--group-by-module",
default=False,
action="store_true",
help="Group stats by module")
parser.add_argument("--select-stat",
default=[],
action="append",
help="Select specific statistics")
parser.add_argument("--select-stats-from-csv-baseline",
type=argparse.FileType('rb', 0), default=None,
help="Select statistics present in a CSV baseline")
parser.add_argument("--exclude-timers",
default=False,
action="store_true",
help="only select counters, exclude timers")
parser.add_argument("--sort-by-delta-pct",
default=False,
action="store_true",
help="Sort comparison results by delta-%%, not stat")
parser.add_argument("--sort-descending",
default=False,
action="store_true",
help="Sort comparison results in descending order")
parser.add_argument("--merge-by",
default="sum",
type=str,
help="Merge identical metrics by (sum|min|max)")
parser.add_argument("--merge-timers",
default=False,
action="store_true",
help="Merge timers across modules/targets/etc.")
parser.add_argument("--divide-by",
default=1,
metavar="D",
type=int,
help="Divide stats by D (to take an average)")
parser.add_argument("--markdown",
default=False,
action="store_true",
help="Write output in markdown table format")
parser.add_argument("--include-unchanged",
default=False,
action="store_true",
help="Include unchanged stats values in comparison")
parser.add_argument("--close-regressions",
default=False,
action="store_true",
help="Close regression details in markdown")
parser.add_argument("--github-emoji",
default=False,
action="store_true",
help="Add github-emoji indicators to markdown")
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument("--catapult", action="store_true",
help="emit a 'catapult'-compatible trace of events")
modes.add_argument("--incrementality", action="store_true",
help="summarize the 'incrementality' of a build")
modes.add_argument("--set-csv-baseline", type=str, default=None,
help="Merge stats from a stats-dir into a CSV baseline")
modes.add_argument("--compare-to-csv-baseline",
type=argparse.FileType('rb', 0), default=None,
metavar="BASELINE.csv",
help="Compare stats dir to named CSV baseline")
modes.add_argument("--compare-stats-dirs",
action="store_true",
help="Compare two stats dirs directly")
modes.add_argument("--lnt", action="store_true",
help="Emit an LNT-compatible test summary")
modes.add_argument("--evaluate", type=str, default=None,
help="evaluate an expression of stat-names")
modes.add_argument("--evaluate-delta", type=str, default=None,
help="evaluate an expression of stat-deltas")
parser.add_argument('remainder', nargs=argparse.REMAINDER,
help="stats-dirs to process")
args = parser.parse_args()
if len(args.remainder) == 0:
parser.print_help()
return 1
if args.catapult:
write_catapult_trace(args)
elif args.compare_stats_dirs:
return compare_stats_dirs(args)
elif args.set_csv_baseline is not None:
return set_csv_baseline(args)
elif args.compare_to_csv_baseline is not None:
return compare_to_csv_baseline(args)
elif args.incrementality:
if args.paired:
show_paired_incrementality(args)
else:
show_incrementality(args)
elif args.lnt:
write_lnt_values(args)
elif args.evaluate:
return evaluate(args)
elif args.evaluate_delta:
return evaluate_delta(args)
return None
sys.exit(main())
| apache-2.0 |
def-/commandergenius | project/jni/python/src/Tools/scripts/linktree.py | 101 | 2425 | #! /usr/bin/env python
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print oldtree + ': not a directory'
return 1
try:
os.mkdir(newtree, 0777)
except os.error, msg:
print newtree + ': cannot mkdir:', msg
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error, msg:
if not link_may_fail:
print linkname + ': cannot symlink:', msg
return 1
else:
print linkname + ': warning: cannot symlink:', msg
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print 'linknames', (old, new, link)
try:
names = os.listdir(old)
except os.error, msg:
print old + ': warning: cannot listdir:', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print oldname, newname, linkname
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0777)
ok = 1
except:
print newname + \
': warning: cannot mkdir:', msg
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 |
JunctionTV/Getting-Access-Tokens | python/python_access_token.py | 1 | 2467 | """
To get the authentication token for Junction TV API calls
This code had two function:
- ``getAuthToken(client_id, client_secret)``: to get the authentication call from `http://cloud.junctiontv.net/ums/2.0/oauth/`
- ``getFeeds(token)``: to get the Feeds according to vairous Junction TV API
For detailed description read: http://api.junctiontv.com/jtv/jtapi/getting-access-tokens-2
Refer Github: https://github.com/JunctionTV/Getting-Access-Tokens
"""
# Author: Gourab Chowdhury <gourab@junctiotntv.com>
#
# License: The MIT License (MIT)
# Copyright (c) <2016> <Junction TV Inc.>
import httplib, urllib, base64, json, sys
# get the oauth 2.0 token
def getAuthToken(client_id, client_secret):
conn = httplib.HTTPSConnection("cloud.junctiontv.net")
url = "/ums/2.0/oauth/"
authString = base64.encodestring('%s:%s' % (client_id, client_secret)).replace('\n', '')
headersMap = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic " + authString
}
conn.request("POST", url, headers=headersMap)
response = conn.getresponse()
if response.status == 200:
data = response.read()
result = json.loads(data)
return result["access_token"]
else:
raise Exception('[API_CALL_ERROR]' + "{status code: " + str(response.status) + ",reason: "+ response.reason+" }")
# call jtv API
def getFeeds(token):
conn = httplib.HTTPConnection("www.examplejtvapi.com")
url = "/xyz/abc/def"
headersMap = {
"Authorization": "Bearer " + token
}
#The method will vary according to specific API.
conn.request("GET", url, headers=headersMap)
response = conn.getresponse()
if response.status == 200:
data = response.read()
result = json.loads( data )
return result
else:
raise Exception('[API_CALL_ERROR]' + "{status code: " + str(response.status) + ",reason: "+ response.reason+" }")
def main():
client_id = "XXXX"
client_secret = "XXXXXXXXX....XXXXXX"
token=getAuthToken(client_id, client_secret)
print "Authentication Token: ",token
try:
results = getFeeds(token)
except e:
# handle an auth error by re-fetching a auth token again
token = getAuthToken(client_id, client_secret)
results = getFeeds(token)
# print the results
print results
if __name__ == "__main__":
main()
| mit |
pmav99/FloodMapsWorkshop | python/srtm2_dem.py | 3 | 16004 | #!/usr/bin/env python
#
# Created on 7/5/2013 Pat Cappelaere - Vightel Corporation
#
# Requirements:
# gdal...
#
# DEM Manipulation and Visualization utilities for HydroSHEDS
#
# Inpiration from:
# http://blog.thematicmapping.org/2012/07/terrain-mapping-with-mapnik.html
#
#
import os, inspect
import argparse
import subprocess
import sys, urllib, httplib
from datetime import datetime
import math
import numpy, scipy
import scipy.signal
from osgeo import gdal, gdal_array
from osgeo import osr
from osgeo import ogr
from which import *
import mapnik
from mapnik import DatasourceCache as c;
import config
class DEM:
def __init__( self, tile, zone, force, bbox, target, infile ):
self.hydroSHEDS_dir = config.SRTM2_DIR
self.HANDS_DIR = config.SRTM2_DIR
self.tile = tile
self.zone = zone
self.force = force
self.bbox = bbox
self.target = target
if zone and tile:
self.dem_dir = os.path.join(self.hydroSHEDS_dir, zone, tile, tile+"_dem_bil")
self.RasterXSize = 5855
self.RasterYSize = 6142
if bbox and target:
self.dem_vrt = os.path.join(self.HANDS_DIR, config.HANDS_AREA+ "_dem.vrt")
self.dem_dir = target
self.tile = "na"
self.infile = os.path.join(target, infile)
print "DEM DIR", self.dem_dir
def hillshade(self):
src = os.path.join(self.dem_dir, self.tile + "_dem_4326.tif")
dest = os.path.join(self.dem_dir, self.tile + "_dem_hillshade_4326.tif")
if not os.path.isfile(dest) or self.force:
cmd = "gdaldem hillshade -co compress=lzw " + src + " " + dest
print cmd
err = os.system(cmd)
if err != 0:
print('ERROR: hillshade file could not be generated:', err)
sys.exit(-1)
def color_relief(self):
print "Color Relief..."
color_txt_file = "./dem_color_relief.txt"
src = os.path.join(self.dem_dir, self.tile + "_dem_4326.tif")
dest = os.path.join(self.dem_dir, self.tile + "_dem_color_relief_4326.tif")
legend = os.path.join(self.dem_dir, "na_dem_legend.png")
ds = gdal.Open( src )
if ds is None:
print('ERROR: file no data:', src)
sys.exit(-1)
band = ds.GetRasterBand(1)
self.RasterXSize = ds.RasterXSize
self.RasterYSize = ds.RasterYSize
(minv,maxv,mean, stddev) = band.GetStatistics(1,1)
print '*** Min=%.3f, Max=%.3f Mean=%.3f Stdev=%.3f' % (minv,maxv,mean,stddev)
# generate the color_relief.txt file
mid_lower = (minv+mean)/2
mid_higher = (maxv+mean)/2
str = "%.2f 110 220 110\n" % (minv)
str += "%.2f 240 250 160\n" % (mid_lower)
str += "%.2f 230 220 170\n" % (mean)
str += "%.2f 220 220 220\n" % (mid_higher)
str += "%.2f 250 250 250\n" % (maxv)
if not os.path.isfile(color_txt_file) or self.force:
f = open(color_txt_file, 'w')
f.write(str)
f.close()
ds = None
band = None
if not os.path.isfile(dest) or self.force:
cmd ="gdaldem color-relief " + src + " " + color_txt_file + " " + dest
err = os.system(cmd)
if err != 0:
print('ERROR: color_relief file could not be generated:', err)
sys.exit(-1)
#if not os.path.isfile(legend) or self.force:
# if self.tile == "na":
# cmd ="hand_legend.py -l %.0f %.0f %.0f %0.f %0.f --dir %s " % (min, mid_lower, mean, mid_higher, max, self.target)
# else:
# cmd ="hand_legend.py -l %.0f %.0f %.0f %0.f %0.f -t %s -z %s" % (min, mid_lower, mean, mid_higher, max, self.tile, self.zone)
# print cmd
# err = os.system(cmd)
# if err != 0:
# print('ERROR: legend file could not be generated:', err)
# sys.exit(-1)
def slopeshade(self):
slope_txt_file = "./color_slope.txt"
src = os.path.join(self.dem_dir, self.tile + "_dem_4326.tif")
slope = os.path.join(self.dem_dir, self.tile + "_dem_slope_4326.tif")
slopeshade = os.path.join(self.dem_dir, self.tile + "_dem_slopeshade_4326.tif")
if not os.path.isfile(slope) or self.force:
cmd = "gdaldem slope " + src + " " + slope
print cmd
err = os.system(cmd)
if err != 0:
print('ERROR: slope file could not be generated:', err)
sys.exit(-1)
if not os.path.isfile(slope_txt_file):
print "Creating ", slope_txt_file
str = "0 255 255 255\n"
str += "90 0 0 0"
f = open(slope_txt_file, 'w')
f.write(str)
f.close()
if not os.path.isfile(slopeshade) or self.force:
cmd ="gdaldem color-relief " + slope + " " + slope_txt_file + " " + slopeshade
print cmd
err = os.system(cmd)
if err != 0:
print('ERROR: slopeshade file could not be generated:', err)
sys.exit(-1)
def water_relief(self):
xml_file = os.path.join(self.dem_dir, "na_terrain.xml")
slopeshade = os.path.join(self.dem_dir, self.tile + "_dem_slopeshade_4326.tif")
hillshade = os.path.join(self.dem_dir, self.tile + "_dem_hillshade_4326.tif")
color_relief = os.path.join(self.dem_dir, self.tile + "_dem_color_relief_4326.tif")
relief = os.path.join(self.dem_dir, self.tile + "_dem_relief_4326.tif")
flood = None
print "water_relief", color_relief
if( self.tile == 'na'):
water = os.path.join(self.dem_dir, self.tile+"_water_image_transp.tif")
#flood = os.path.join(self.dem_dir, "outputfile_4326_hand.tif")
else:
water = os.path.join(self.hydroSHEDS_dir, self.zone, self.tile, self.tile+"_water_image_transp.tif")
if not os.path.isfile(relief) or self.force:
self.compose_relief( xml_file, slopeshade, hillshade, color_relief, relief, flood, water)
def hand_relief(self):
xml_file = os.path.join(self.dem_dir, "na_terrain.xml")
slopeshade = os.path.join(self.dem_dir, self.tile + "_dem_slopeshade_4326.tif")
hillshade = os.path.join(self.dem_dir, self.tile + "_dem_hillshade_4326.tif")
color_relief = os.path.join(self.dem_dir, self.tile + "_dem_color_relief_4326.tif")
relief = os.path.join(self.dem_dir, self.tile + "_dem_hand_relief_4326.tif")
flood = None
if self.tile == 'na':
water = os.path.join(self.dem_dir, "hand_4326_transp.tif")
flood = os.path.join(self.dem_dir, "outputfile_4326_hand.tif")
else:
water = os.path.join(self.HAND_DIR, self.zone, self.tile + "_hand_transp.tif")
if not os.path.isfile(relief) or self.force:
self.compose_relief( xml_file, slopeshade, hillshade, color_relief, relief, flood, water)
def compose_relief( self, xml_file, slopeshade, hillshade, color_relief, relief, flood, water):
xml = "<Map srs='+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs'>\n"
xml += "<Style name='color relief style'>\n"
xml += "<Rule>\n"
xml += "<RasterSymbolizer comp-op='src-over' />\n"
xml += "</Rule>\n"
xml += "</Style>\n"
xml += "<Style name='slopeshade style'>\n"
xml += "<Rule>\n"
xml += "<RasterSymbolizer opacity='0.4' comp-op='multiply' />\n"
xml += "</Rule>\n"
xml += "</Style>\n"
xml += "<Style name='hillshade style'>\n"
xml += "<Rule>\n"
xml += "<RasterSymbolizer opacity='0.6' comp-op='multiply' />\n"
xml += "</Rule>\n"
xml += "</Style>\n"
xml += "<Style name='flood water'>\n"
xml += "<Rule>\n"
xml += "<RasterSymbolizer opacity='1' />\n"
xml += "</Rule>\n"
xml += "</Style>\n"
xml += "<Style name='water'>\n"
xml += "<Rule>\n"
xml += "<RasterSymbolizer opacity='1' comp-op='src-over'/>\n"
xml += "</Rule>\n"
xml += "</Style>\n"
xml += "<Layer name='color relief'>\n"
xml += "<StyleName>color relief style</StyleName>\n"
xml += "<Datasource>\n"
xml += "<Parameter name='type'>gdal</Parameter>\n"
xml += "<Parameter name='file'>" + color_relief +"</Parameter>\n"
xml += "<Parameter name='format'>tiff</Parameter>\n"
xml += "</Datasource>\n"
xml += "</Layer>\n"
xml += "<Layer name='slopeshade'>\n"
xml += "<StyleName>slopeshade style</StyleName>\n"
xml += "<Datasource>\n"
xml += "<Parameter name='type'>gdal</Parameter>\n"
xml += "<Parameter name='file'>"+ slopeshade+"</Parameter>\n"
xml += "<Parameter name='format'>tiff</Parameter>\n"
xml += "</Datasource>\n"
xml += "</Layer>\n"
xml += "<Layer name='hillshade'>\n"
xml += "<StyleName>hillshade style</StyleName>\n"
xml += "<Datasource>\n"
xml += "<Parameter name='type'>gdal</Parameter>\n"
xml += "<Parameter name='file'>"+ hillshade+"</Parameter>\n"
xml += "<Parameter name='format'>tiff</Parameter>\n"
xml += "</Datasource>\n"
xml += "</Layer>\n"
if flood:
xml += "<Layer name='flood water'>\n"
xml += "<StyleName>flood water</StyleName>\n"
xml += "<Datasource>\n"
xml += "<Parameter name='type'>gdal</Parameter>\n"
xml += "<Parameter name='file'>"+ flood +"</Parameter>\n"
xml += "<Parameter name='format'>tiff</Parameter>\n"
xml += "</Datasource>\n"
xml += "</Layer>\n"
xml += "<Layer name='water'>\n"
xml += "<StyleName>water</StyleName>\n"
xml += "<Datasource>\n"
xml += "<Parameter name='type'>gdal</Parameter>\n"
xml += "<Parameter name='file'>"+ water +"</Parameter>\n"
xml += "<Parameter name='format'>tiff</Parameter>\n"
xml += "</Datasource>\n"
xml += "</Layer>\n"
xml += "</Map>\n"
f = open(xml_file, 'w')
f.write(xml)
f.close()
print "Generating relief file:", color_relief, xml_file
print self.RasterXSize, self.RasterYSize
map = mapnik.Map(self.RasterXSize, self.RasterYSize)
mapnik.load_map(map, xml_file)
map.zoom_all()
mapnik.render_to_file(map, relief)
def metersToLatLng(self,ds,X,Y):
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
srsLatLong = srs.CloneGeogCS()
ct = osr.CoordinateTransformation(srs,srsLatLong)
return ct.TransformPoint(X,Y)
# create matching osm water layer
def create_osm_water_layer(self):
dem_img = os.path.join(self.dem_dir, "na_dem_4326.tif")
osm_surface_water_img = os.path.join(self.dem_dir, "na_water_image.tif")
osm_surface_water_img_tif = os.path.join(self.dem_dir, "na_water_image.tif")
osm_surface_water_transp_img = os.path.join(self.dem_dir, "na_water_image_transp.tif")
ds = gdal.Open( dem_img )
geotransform = ds.GetGeoTransform()
dx = geotransform[1] * ds.RasterXSize
dy = geotransform[5] * ds.RasterYSize
X1 = geotransform[0]
Y1 = geotransform[3] + dy
X2 = geotransform[0] + dx
Y2 = geotransform[3]
#print "meters", X1,Y1,X2,Y2
LLC = self.metersToLatLng(ds,X1,Y1)
URC = self.metersToLatLng(ds,X2,Y2)
if not os.path.isfile(osm_surface_water_img) or self.force:
cmd = "python generate_image.py --mapfile %(map)s --name %(fname)s --bbox %(X1)f %(Y1)f %(X2)f %(Y2)f --img %(dx)d %(dy)d" % \
{ 'map': 'water_4326.xml', 'fname':osm_surface_water_img,
'X1':LLC[0], 'Y1':LLC[1], 'X2':URC[0], 'Y2':URC[1],
'dx': ds.RasterXSize, 'dy': ds.RasterYSize
}
print(cmd)
err = os.system(cmd)
if err != 0:
print('ERROR: water file could not be generated:', err)
sys.exit(-1)
self.transp_water_layer(dem_img, osm_surface_water_img_tif, osm_surface_water_transp_img)
#print "Water done"
ds = None
def water_layer(self):
water = os.path.join(self.hydroSHEDS_dir, self.zone, self.tile, self.tile+"_water_image.tif")
water_transp = os.path.join(self.hydroSHEDS_dir, self.zone, self.tile, self.tile+"_water_image_transp.tif")
src = os.path.join(self.dem_dir, self.tile + "_dem_4326.tif")
self.transp_water_layer(src, water, water_transp)
def hand_layer(self):
if self.tile == 'na':
hand = os.path.join(self.dem_dir, "hand_4326.tif")
hand_transp = os.path.join(self.dem_dir, "hand_4326_transp.tif")
else:
hand = os.path.join(self.HAND_DIR, self.zone, self.tile+"_hand.tif")
hand_transp = os.path.join(self.HAND_DIR, self.zone, self.tile + "_hand_transp.tif")
src = os.path.join(self.dem_dir, self.tile + "_dem_4326.tif")
self.transp_water_layer(src, hand, hand_transp)
# Convert to transparent and copy projection
# Warning: this will fail if convert is not built with tiff delegate
# PNG Driver will be used to create target tif file and gdalcopyproj will fail
def transp_water_layer(self, src, water, water_transp):
if self.force or not os.path.isfile(water_transp):
cmd = "convert -transparent black " + water + " " + water_transp
print cmd
err = os.system(cmd)
if err != 0:
print('ERROR: water transparency file could not be generated:', err)
sys.exit(-1)
cmd = "gdalcopyproj.py "+ src + " " + water_transp
print cmd
err = os.system(cmd)
if err != 0:
print('ERROR: gdalcopyproj generated error:', err)
sys.exit(-1)
#
# Subset DEM to bbox area
#
def subset(self):
base_img = self.infile #os.path.join(self.dem_dir, "outputfile_4326.tif")
in_img = self.dem_vrt
out_img = os.path.join(self.dem_dir, "na_dem_4326.tif")
if self.force or not os.path.isfile(out_img):
cmd = "subset.py "+ base_img + " " + in_img + " " + out_img
print cmd
os.system(cmd)
def water_from_dem(self):
slope = os.path.join(self.dem_dir, self.tile + "_dem_slope_4326.tif")
slope2 = os.path.join(self.dem_dir, self.tile + "_dem_slope_4326_2.tif")
data = gdal_array.LoadFile(slope)
# De-speckle
data = scipy.signal.medfilt2d(data, kernel_size=3)
gdal_array.SaveArray(data.astype(numpy.int8), slope2, prototype=gdal.Open(slope))
print "saved water from dem", slope2
#
# Main
#
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
# make sure we have ImageMagick convert
err = which("convert")
if err == None:
print "convert missing"
sys.exit(-1)
# make sure that mapnik as the gdal plugin
if not 'gdal' in c.plugin_names():
print "Missing 'gdal' input plugin in mapnik"
sys.exit(-1)
# make sure tiff delegate is present
output = subprocess.check_output("convert --version | grep Delegates", shell=True)
err = output.find('tiff')
if err==-1:
print "ImageMagick Convert does not have a tiff delegate... rebuild it!", output
sys.exit(-1)
parser = argparse.ArgumentParser(description='Generate DEM')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="HydroSHEDS forces new water image to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose")
apg_input.add_argument("-d", "--scene", nargs=1, help="Scene")
options = parser.parse_args()
force = options.force
verbose = options.verbose
scene = options.scene[0]
if scene[0:3] == "EO1":
target_dir = os.path.join(config.EO1_DIR,scene+"_1T")
fileName = os.path.join(target_dir, "COMPOSITE_543_dn_4326.tif")
print fileName
ds = gdal.Open( fileName )
if ds is None:
print('ERROR: file no data:')
sys.exit(-1)
geotransform = ds.GetGeoTransform()
projection = ds.GetProjection()
minX = geotransform[0]
maxY = geotransform[3]
pres = geotransform[1]
maxX = minX + ds.RasterXSize*pres
minY = maxY - ds.RasterXSize*pres
bbox = [minX, minY, maxX, maxY]
app = DEM( None, None, force, bbox, target_dir, "COMPOSITE_543_dn_4326.tif")
#app.subset()
#app.hillshade()
#app.color_relief()
#app.slopeshade()
app.water_from_dem()
#if tile and zone:
# print str(datetime.now()), "Starting processing of tile:"+tile[0]+" zone:"+zone[0]
# app = DEM( tile[0], zone[0], force, None, None, None)
# app.hillshade()
# app.color_relief()
# app.slopeshade()
# app.water_layer()
# app.water_relief()
## app.hand_layer()
## app.hand_relief()
#if bbox and target_dir and infile:
# print str(datetime.now()), "Starting DEM processing for:", bbox, target_dir[0], infile[0]
# app = DEM( None, None, force, bbox, target_dir[0], infile[0])
# app.subset()
# app.hillshade()
# app.color_relief()
# app.slopeshade()
# app.create_osm_water_layer()
# app.water_relief()
#app.hand_layer()
#app.hand_relief()
print str(datetime.now()), "Done." | apache-2.0 |
pknight007/electrum-vtc | plugins/cosigner_pool/vtc.py | 4 | 7580 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import threading
import time
import xmlrpclib
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_vtc import bitcoin, util
from electrum_vtc import transaction
from electrum_vtc.plugins import BasePlugin, hook
from electrum_vtc.i18n import _
from electrum_vtc.wallet import Multisig_Wallet
from electrum_vtc_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
PORT = 12344
HOST = 'cosigner.electrum.org'
server = xmlrpclib.ServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.emit(SIGNAL("cosigner:receive"), keyhash,
message)
# poll every 30 seconds
time.sleep(30)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QObject()
self.obj.connect(self.obj, SIGNAL('cosigner:receive'), self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1].encode('hex')
_hash = bitcoin.Hash(K).encode('hex')
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum_vtc.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(tx.raw, K)
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.has_password():
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bitcoin.deserialize_xprv(xprv)[-1].encode('hex')
EC = bitcoin.EC_KEY(k.decode('hex'))
message = EC.decrypt_message(message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| mit |
crcresearch/osf.io | tests/test_spam_mixin.py | 8 | 4828 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.utils import timezone
from nose.tools import * # noqa PEP8 asserts
from framework.auth import Auth
from tests.base import OsfTestCase
from osf_tests.factories import UserFactory, CommentFactory
from osf.models import SpamStatus
class TestSpamMixin(OsfTestCase):
def setUp(self):
super(TestSpamMixin, self).setUp()
self.comment = CommentFactory()
self.auth = Auth(user=self.comment.user)
def test_report_abuse(self):
user = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user, date=time, category='spam', text='ads', save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
equivalent = dict(
date=time,
category='spam',
text='ads',
retracted=False
)
assert_in(user._id, self.comment.reports)
assert_equal(self.comment.reports[user._id], equivalent)
def test_report_abuse_own_comment(self):
with assert_raises(ValueError):
self.comment.report_abuse(
self.comment.user,
category='spam', text='ads',
save=True
)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
def test_retract_report(self):
user = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user, date=time, category='spam', text='ads', save=True
)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.retract_report(user, save=True)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
equivalent = {
'date': time,
'category': 'spam',
'text': 'ads',
'retracted': True
}
assert_in(user._id, self.comment.reports)
assert_equal(self.comment.reports[user._id], equivalent)
def test_retract_report_not_reporter(self):
reporter = UserFactory()
non_reporter = UserFactory()
self.comment.report_abuse(
reporter, category='spam', text='ads', save=True
)
with assert_raises(ValueError):
self.comment.retract_report(non_reporter, save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_retract_one_report_of_many(self):
user_1 = UserFactory()
user_2 = UserFactory()
time = timezone.now()
self.comment.report_abuse(
user_1, date=time, category='spam', text='ads', save=True
)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.report_abuse(
user_2, date=time, category='spam', text='all', save=True
)
self.comment.retract_report(user_1, save=True)
equivalent = {
'date': time,
'category': 'spam',
'text': 'ads',
'retracted': True
}
assert_in(user_1._id, self.comment.reports)
assert_equal(self.comment.reports[user_1._id], equivalent)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_flag_spam(self):
self.comment.flag_spam()
self.comment.save()
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_cannot_remove_flag_not_retracted(self):
user = UserFactory()
self.comment.report_abuse(
user, category='spam', text='ads', save=True
)
self.comment.remove_flag(save=True)
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
def test_remove_flag(self):
self.comment.flag_spam()
self.comment.save()
assert_equal(self.comment.spam_status, SpamStatus.FLAGGED)
self.comment.remove_flag(save=True)
assert_equal(self.comment.spam_status, SpamStatus.UNKNOWN)
def test_confirm_ham(self):
self.comment.confirm_ham(save=True)
assert_equal(self.comment.spam_status, SpamStatus.HAM)
def test_confirm_spam(self):
self.comment.confirm_spam(save=True)
assert_equal(self.comment.spam_status, SpamStatus.SPAM)
def test_validate_reports_bad_key(self):
self.comment.reports[None] = {'category': 'spam', 'text': 'ads'}
with assert_raises(ValidationError):
self.comment.save()
def test_validate_reports_bad_type(self):
self.comment.reports[self.comment.user._id] = 'not a dict'
with assert_raises(ValidationError):
self.comment.save()
def test_validate_reports_bad_value(self):
self.comment.reports[self.comment.user._id] = {'foo': 'bar'}
with assert_raises(ValidationError):
self.comment.save()
| apache-2.0 |
webmedic/booker | src/gdata/tlslite/utils/Python_RSAKey.py | 47 | 7661 | """Pure-Python RSA implementation."""
from .cryptomath import *
from . import xmltools
from .ASN1Parser import ASN1Parser
from .RSAKey import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def hash(self):
s = self.writeXMLPublicKey('\t\t')
return hashAndBase64(s.strip())
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def write(self, indent=''):
if self.d:
s = indent+'<privateKey xmlns="http://trevp.net/rsa">\n'
else:
s = indent+'<publicKey xmlns="http://trevp.net/rsa">\n'
s += indent+'\t<n>%s</n>\n' % numberToBase64(self.n)
s += indent+'\t<e>%s</e>\n' % numberToBase64(self.e)
if self.d:
s += indent+'\t<d>%s</d>\n' % numberToBase64(self.d)
s += indent+'\t<p>%s</p>\n' % numberToBase64(self.p)
s += indent+'\t<q>%s</q>\n' % numberToBase64(self.q)
s += indent+'\t<dP>%s</dP>\n' % numberToBase64(self.dP)
s += indent+'\t<dQ>%s</dQ>\n' % numberToBase64(self.dQ)
s += indent+'\t<qInv>%s</qInv>\n' % numberToBase64(self.qInv)
s += indent+'</privateKey>'
else:
s += indent+'</publicKey>'
#Only add \n if part of a larger structure
if indent != '':
s += '\n'
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits/2, False)
q = getRandomPrime(bits/2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 3 #Needed to be long, for Java
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
start = s.find("-----BEGIN PRIVATE KEY-----")
if start != -1:
end = s.find("-----END PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parsePKCS8(bytes)
else:
start = s.find("-----BEGIN RSA PRIVATE KEY-----")
if start != -1:
end = s.find("-----END RSA PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parseSSLeay(bytes)
raise SyntaxError("Missing PEM Prefix")
parsePEM = staticmethod(parsePEM)
def parseXML(s):
element = xmltools.parseAndStripWhitespace(s)
return Python_RSAKey._parseXML(element)
parseXML = staticmethod(parseXML)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
def _parseXML(element):
try:
xmltools.checkName(element, "privateKey")
except SyntaxError:
xmltools.checkName(element, "publicKey")
#Parse attributes
xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z")
xmltools.checkNoMoreAttributes(element)
#Parse public values (<n> and <e>)
n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx))
e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx))
d = 0
p = 0
q = 0
dP = 0
dQ = 0
qInv = 0
#Parse private values, if present
if element.childNodes.length>=3:
d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx))
p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx))
q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx))
dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx))
dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx))
qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx))
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseXML = staticmethod(_parseXML)
| mit |
julen/translate | translate/storage/rc.py | 25 | 10639 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006,2008-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Classes that hold units of .rc files (:class:`rcunit`) or entire files
(:class:`rcfile`) used in translating Windows Resources.
.. note:::
This implementation is based mostly on observing WINE .rc files,
these should mimic other non-WINE .rc files.
"""
import re
from translate.storage import base
def escape_to_python(string):
"""Escape a given .rc string into a valid Python string."""
pystring = re.sub('"\s*\\\\\n\s*"', "", string) # xxx"\n"xxx line continuation
pystring = re.sub("\\\\\\\n", "", pystring) # backslash newline line continuation
pystring = re.sub("\\\\n", "\n", pystring) # Convert escaped newline to a real newline
pystring = re.sub("\\\\t", "\t", pystring) # Convert escape tab to a real tab
pystring = re.sub("\\\\\\\\", "\\\\", pystring) # Convert escape backslash to a real escaped backslash
return pystring
def escape_to_rc(string):
"""Escape a given Python string into a valid .rc string."""
rcstring = re.sub("\\\\", "\\\\\\\\", string)
rcstring = re.sub("\t", "\\\\t", rcstring)
rcstring = re.sub("\n", "\\\\n", rcstring)
return rcstring
class rcunit(base.TranslationUnit):
"""A unit of an rc file"""
def __init__(self, source="", encoding="cp1252"):
"""Construct a blank rcunit."""
super(rcunit, self).__init__(source)
self.name = ""
self._value = ""
self.comments = []
self.source = source
self.match = None
self.encoding = encoding
def setsource(self, source):
"""Sets the source AND the target to be equal"""
self._rich_source = None
self._value = source or ""
def getsource(self):
return self._value
source = property(getsource, setsource)
def settarget(self, target):
""".. note:: This also sets the ``.source`` attribute!"""
self._rich_target = None
self.source = target
def gettarget(self):
return self.source
target = property(gettarget, settarget)
def __str__(self):
"""Convert to a string. Double check that unicode is handled somehow here."""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
"""Convert the element back into formatted lines for a .rc file."""
if self.isblank():
return "".join(self.comments + ["\n"])
else:
return "".join(self.comments + ["%s=%s\n" % (self.name, self.value)])
def getlocations(self):
return [self.name]
def addnote(self, text, origin=None, position="append"):
self.comments.append(text)
def getnotes(self, origin=None):
return '\n'.join(self.comments)
def removenotes(self):
self.comments = []
def isblank(self):
"""Returns whether this is a blank element, containing only comments."""
return not (self.name or self.value)
class rcfile(base.TranslationStore):
"""This class represents a .rc file, made up of rcunits."""
UnitClass = rcunit
def __init__(self, inputfile=None, lang=None, sublang=None, encoding="cp1252"):
"""Construct an rcfile, optionally reading in from inputfile."""
self.encoding = encoding
super(rcfile, self).__init__(unitclass=self.UnitClass)
self.filename = getattr(inputfile, 'name', '')
self.lang = lang
self.sublang = sublang
if inputfile is not None:
rcsrc = inputfile.read().decode(encoding)
inputfile.close()
self.parse(rcsrc)
def parse(self, rcsrc):
"""Read the source of a .rc file in and include them as units."""
BLOCKS_RE = re.compile("""
(?:
LANGUAGE\s+[^\n]*| # Language details
/\*.*?\*/[^\n]*| # Comments
\/\/[^\n\r]*| # One line comments
(?:[0-9A-Z_]+\s+(?:MENU|DIALOG|DIALOGEX|TEXTINCLUDE)|STRINGTABLE)\s # Translatable section or include text (visual studio)
.*?
(?:
BEGIN(?:\s*?POPUP.*?BEGIN.*?END\s*?)+?END|BEGIN.*?END| # FIXME Need a much better approach to nesting menus
{(?:\s*?POPUP.*?{.*?}\s*?)+?}|{.*?})+[\n]|
\s*[\n] # Whitespace
)
""", re.DOTALL + re.VERBOSE)
STRINGTABLE_RE = re.compile("""
(?P<name>[0-9A-Za-z_]+?),?\s*
L?"(?P<value>.*?)"\s*[\n]
""", re.DOTALL + re.VERBOSE)
DIALOG_RE = re.compile("""
(?P<type>AUTOCHECKBOX|AUTORADIOBUTTON|CAPTION|Caption|CHECKBOX|CTEXT|CONTROL|DEFPUSHBUTTON|
GROUPBOX|LTEXT|PUSHBUTTON|RADIOBUTTON|RTEXT) # Translatable types
\s+
L? # Unkown prefix see ./dlls/shlwapi/shlwapi_En.rc
"(?P<value>.*?)" # String value
(?:\s*,\s*|[\n]) # FIXME ./dlls/mshtml/En.rc ID_DWL_DIALOG.LTEXT.ID_DWL_STATUS
(?P<name>.*?|)\s*(?:/[*].*?[*]/|),
""", re.DOTALL + re.VERBOSE)
MENU_RE = re.compile("""
(?P<type>POPUP|MENUITEM)
\s+
"(?P<value>.*?)" # String value
(?:\s*,?\s*)?
(?P<name>[^\s]+).*?[\n]
""", re.DOTALL + re.VERBOSE)
processsection = False
self.blocks = BLOCKS_RE.findall(rcsrc)
for blocknum, block in enumerate(self.blocks):
processblock = None
if block.startswith("LANGUAGE"):
if self.lang is None or self.sublang is None or re.match("LANGUAGE\s+%s,\s*%s\s*$" % (self.lang, self.sublang), block) is not None:
processsection = True
else:
processsection = False
else:
if re.match(".+LANGUAGE\s+[0-9A-Za-z_]+,\s*[0-9A-Za-z_]+\s*[\n]", block, re.DOTALL) is not None:
if re.match(".+LANGUAGE\s+%s,\s*%s\s*[\n]" % (self.lang, self.sublang), block, re.DOTALL) is not None:
processblock = True
else:
processblock = False
if not (processblock or (processsection and processblock is None)):
continue
if block.startswith("STRINGTABLE"):
for match in STRINGTABLE_RE.finditer(block):
if not match.groupdict()['value']:
continue
newunit = rcunit(escape_to_python(match.groupdict()['value']))
newunit.name = "STRINGTABLE." + match.groupdict()['name']
newunit.match = match
self.addunit(newunit)
if block.startswith("/*"): # Comments
continue
if block.startswith("//"): # One line comments
continue
if re.match("[0-9A-Z_]+\s+TEXTINCLUDE", block) is not None: # TEXTINCLUDE is editor specific, not part of the app.
continue
if re.match("[0-9A-Z_]+\s+DIALOG", block) is not None:
dialog = re.match("(?P<dialogname>[0-9A-Z_]+)\s+(?P<dialogtype>DIALOGEX|DIALOG)", block).groupdict()
dialogname = dialog["dialogname"]
dialogtype = dialog["dialogtype"]
for match in DIALOG_RE.finditer(block):
if not match.groupdict()['value']:
continue
type = match.groupdict()['type']
value = match.groupdict()['value']
name = match.groupdict()['name']
newunit = rcunit(escape_to_python(value))
if type == "CAPTION" or type == "Caption":
newunit.name = "%s.%s.%s" % (dialogtype, dialogname, type)
elif name == "-1":
newunit.name = "%s.%s.%s.%s" % (dialogtype, dialogname, type, value.replace(" ", "_"))
else:
newunit.name = "%s.%s.%s.%s" % (dialogtype, dialogname, type, name)
newunit.match = match
self.addunit(newunit)
if re.match("[0-9A-Z_]+\s+MENU", block) is not None:
menuname = re.match("(?P<menuname>[0-9A-Z_]+)\s+MENU", block).groupdict()["menuname"]
for match in MENU_RE.finditer(block):
if not match.groupdict()['value']:
continue
type = match.groupdict()['type']
value = match.groupdict()['value']
name = match.groupdict()['name']
newunit = rcunit(escape_to_python(value))
if type == "POPUP":
newunit.name = "MENU.%s.%s" % (menuname, type)
elif name == "-1":
newunit.name = "MENU.%s.%s.%s" % (menuname, type, value.replace(" ", "_"))
else:
newunit.name = "MENU.%s.%s.%s" % (menuname, type, name)
newunit.match = match
self.addunit(newunit)
def __str__(self):
"""Convert the units back to lines."""
return "".join(self.blocks)
| gpl-2.0 |
scenarios/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py | 157 | 4205 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)])
class ReshapeCoderTest(Base):
def setUp(self):
super(ReshapeCoderTest, self).setUp()
self.batch_size = 8
self.num_rows = 50
self.num_columns = 100
self.channels = ['red', 'green', 'blue']
self.masks = [False, True]
tensor = math_ops.range(0,
self.batch_size * self.num_rows * self.num_columns *
len(self.channels) * len(self.masks))
tensor = array_ops.reshape(tensor, [
self.batch_size, self.num_rows, self.num_columns, len(self.channels),
len(self.masks)
])
self.batch_axis = ('batch', range(self.batch_size))
self.row_axis = ('row', range(self.num_rows))
self.column_axis = ('column', range(self.num_columns))
self.channel_axis = ('channel', self.channels)
self.mask_axis = ('mask', self.masks)
axes = [
self.batch_axis, self.row_axis, self.column_axis, self.channel_axis,
self.mask_axis
]
self.masked_image_lt = core.LabeledTensor(tensor, axes)
def test_name(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
decode_lt = rc.decode(encode_lt)
self.assertIn('lt_reshape_encode', encode_lt.name)
self.assertIn('lt_reshape_decode', decode_lt.name)
def test_bijection_flat(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis,
('depth', len(self.channels) * len(self.masks))
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_bijection_with_labels(self):
depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks)))
rc = sugar.ReshapeCoder(['channel', 'mask'],
[depth_axis, ('other', ['label'])])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis, depth_axis,
('other', ['label'])
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.decode(self.masked_image_lt)
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.encode(self.masked_image_lt)
rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'}))
if __name__ == '__main__':
test.main()
| apache-2.0 |
x303597316/hue | desktop/core/src/desktop/lib/exceptions_renderable.py | 28 | 2006 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
These methods should never be placed in 'desktop.lib.exceptions'.
This file exists to remove circular reference caused by importing django_util.
"""
import sys
import traceback
from django.utils.encoding import force_unicode
# Need full import statement
import desktop.lib.django_util
class PopupException(Exception):
"""
Middleware will render this exception; and the template
renders it as a pop-up.
"""
def __init__(self, message, title="Error", detail=None, error_code=500):
Exception.__init__(self, message)
self.message = message
self.title = title
self.detail = detail
self.error_code = error_code
# Traceback is only relevant if an exception was thrown, caught, and we reraise with this exception.
(type, value, tb) = sys.exc_info()
self.traceback = traceback.extract_tb(tb)
def response(self, request):
data = dict(title=force_unicode(self.title), message=force_unicode(self.message), detail=force_unicode(self.detail), traceback=self.traceback)
if not request.ajax:
data['request'] = request
response = desktop.lib.django_util.render("popup_error.mako", request, data)
response.status_code = self.error_code
return response
| apache-2.0 |
ucarion/git-code-debt | tests/server/presentation/commit_delta_test.py | 1 | 1157 | from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import pytest
from git_code_debt.server import metric_config
from git_code_debt.server.presentation.commit_delta import CommitDeltaPresenter
@pytest.yield_fixture
def patched_color_overrides():
with mock.patch.object(
metric_config, 'color_overrides', ['ColorOverrideMetric'],
):
yield
@pytest.mark.usefixtures('patched_color_overrides')
def test_commit_delta_presenter_not_overriden():
assert 'MyMetric' not in metric_config.color_overrides
presenter = CommitDeltaPresenter.from_data(
'MyMetric', mock.sentinel.delta,
)
assert presenter == CommitDeltaPresenter(
'MyMetric', '', mock.sentinel.delta,
)
@pytest.mark.usefixtures('patched_color_overrides')
def test_commit_delta_presenter_with_overrides():
assert 'ColorOverrideMetric' in metric_config.color_overrides
presenter = CommitDeltaPresenter.from_data(
'ColorOverrideMetric', mock.sentinel.delta,
)
assert presenter == CommitDeltaPresenter(
'ColorOverrideMetric', 'color-override', mock.sentinel.delta,
)
| mit |
Jobava/zamboni | mkt/account/tests/test_utils_.py | 18 | 2714 | from django.test.client import RequestFactory
from nose.tools import eq_
import mkt
import mkt.site.tests
from mkt.account.utils import purchase_list
from mkt.constants import apps
from mkt.site.fixtures import fixture
from mkt.site.utils import app_factory
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
class TestUtils(mkt.site.tests.TestCase):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(pk=2519)
self.app = Webapp.objects.get(pk=337141)
self.req = RequestFactory().get('/')
def test_user(self):
self.user.installed_set.create(
addon=self.app,
install_type=apps.INSTALL_TYPE_USER)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
def test_developer(self):
self.user.installed_set.create(
addon=self.app,
install_type=apps.INSTALL_TYPE_DEVELOPER)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
def test_reviewer(self):
self.user.installed_set.create(
addon=self.app,
install_type=apps.INSTALL_TYPE_REVIEWER)
eq_(list(purchase_list(self.req, self.user).object_list), [])
def test_ordering(self):
self.user.installed_set.create(
addon=self.app,
install_type=apps.INSTALL_TYPE_USER)
app2 = app_factory()
self.user.installed_set.create(
addon=app2,
install_type=apps.INSTALL_TYPE_USER)
eq_(list(purchase_list(self.req, self.user).object_list),
[app2, self.app])
def test_contribution_purchase(self):
self.user.contribution_set.create(
addon=self.app,
type=mkt.CONTRIB_PURCHASE)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
def test_contribution_refund(self):
self.user.contribution_set.create(
addon=self.app,
type=mkt.CONTRIB_REFUND)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
def test_contribution_chargeback(self):
self.user.contribution_set.create(
addon=self.app,
type=mkt.CONTRIB_CHARGEBACK)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
def test_contribution_installed_same_app(self):
self.user.installed_set.create(
addon=self.app,
install_type=apps.INSTALL_TYPE_USER)
self.user.contribution_set.create(
addon=self.app,
type=mkt.CONTRIB_PURCHASE)
eq_(list(purchase_list(self.req, self.user).object_list), [self.app])
| bsd-3-clause |
Sir-Henry-Curtis/Ironworks | lib/sqlalchemy/util/queue.py | 22 | 6471 | # util/queue.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition."""
from collections import deque
from time import time as _time
from sqlalchemy.util import threading
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| mit |
pwz3n0/pinball | pinball_ext/executor/cluster_executor.py | 6 | 19163 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for executors running Hadoop/Hive jobs."""
import datetime
import getpass
import os
import re
import subprocess
from pinball_ext.common import hadoop_utils
from pinball_ext.common.decorators import retry
from pinball_ext.common.utils import get_logger
__author__ = 'Zach Drach, Changshu Liu'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class ClusterExecutor(object):
""" A class that implements Hadoop functionality across multiple
Hadoop cluster types.
Subclasses should implement the following functions on their platforms:
- run_hive_query()
- run_hadoop_job()
- run_hadoop_streaming_job()
- kill_job()
- get_job_result()
- get_job_resource_dir() [optional]
The other public functions on the executor class make use of the
above three functions, making it easy to implement a set of common
Hadoop/Hive utility functions on multiple platforms.
All Executor settings are stored in a Config object, described below.
"""
class Config(object):
""" An object for defining configuration parameters for this executor.
All configuration parameters can be overridden in the constructor of the
executor. These values are just the defaults.
"""
# The user that is running the commands
USER = getpass.getuser()
# TODO(csliu): job name should be a job level property.
# A label for all hadoop jobs started by the executor
NAME = "AdHocCommand"
# All jars under these dirs will be added as libjars. It could be local
# path or s3/hdfs path, depending on how concrete executor interpret it.
USER_LIBJAR_DIRS = []
# This jar contains the main class of Hadoop app.
USER_APPJAR_PATH = None
# Archive path for Hadoop application.
USER_ARCHIVE_PATH = None
# Scheduling queue.
SCHEDULER_QUEUE = 'prod_pool' if USER == 'prod' else None
SCHEDULER_PARAM = None # platform-specific
# Name of the cluster platform.
# pinball_ext.common.PLATFORM contains all platforms.
PLATFORM = None
# Configs about a node (usually the master node) inside a Hadoop cluster
HADOOP_HOST_USER = hadoop_utils.HadoopHostConfig.USER_NAME
HADOOP_HOST_NAME = hadoop_utils.HadoopHostConfig.HOST_NAME
HADOOP_HOST_SSH_PORT = hadoop_utils.HadoopHostConfig.SSH_PORT
HADOOP_HOST_SSH_KEY_FILE = hadoop_utils.HadoopHostConfig.SSH_KEY_FILE
HADOOP_HOST_HOME = hadoop_utils.HadoopHostConfig.REMOTE_HADOOP_HOME
def __init__(self, executor_config=None):
""" Initialize parameters for the data job execution layer.
Args:
executor_config: a dictionary of configuration params that override
the defaults. The key names should be the same as the field names of
corresponding xxxExecutor.Config class.
"""
executor_config = executor_config if executor_config else {}
self.log = get_logger(self.__class__.__name__)
# Override self.config using executor_config dict.
self.config = self.Config()
for key, value in executor_config.items():
if key == 'USER_LIBJAR_DIRS':
self.config.USER_LIBJAR_DIRS = value.split(',')
else:
setattr(self.config, key, value)
# Construct HadoopHostConfig object according to overridden config.
self.hadoop_host_config = self._contruct_hadoop_host_config()
self.job_ids = []
def _contruct_hadoop_host_config(self):
hh_config = hadoop_utils.HadoopHostConfig()
hh_config.USER_NAME = self.config.HADOOP_HOST_USER
hh_config.HOST_NAME = self.config.HADOOP_HOST_NAME
hh_config.SSH_PORT = self.config.HADOOP_HOST_SSH_PORT
hh_config.SSH_KEY_FILE = self.config.HADOOP_HOST_SSH_KEY_FILE
hh_config.REMOTE_HADOOP_HOME = self.config.HADOOP_HOST_HOME
return hh_config
@property
def job_name(self):
return "%s:%s" % (self.config.USER, self.config.NAME)
def run_hive_query(self, query_str, upload_archive=True):
"""Run a hive query and return the raw results.
Args:
query_str: A hive query string.
upload_archive: If true, we will upload the archive with git code
before running this command. This is unnecessary for hive
queries with no dependencies on our code base.
Return:
The tuple (output, stderr, job_ids)
output: a list of rows of the query output. Each row is a
list of strings.
stderr: the job stderr in the same format
job_ids: a list of hadoop job ids as python strings
"""
raise NotImplementedError(
"run_hive_query is not supported by this executor.")
def run_hadoop_streaming_job(self,
mapper,
reducer,
input_path,
output_dir,
partitioner=None,
input_format='TextInputFormat',
output_format='TextInputFormat',
extra_args=None,
extra_jars=None):
"""Run a Hadoop Streaming job on the cluster.
See http://hadoop.apache.org/common/docs/r0.20.1/streaming.html.
Args:
mapper: The mapper command to execute.
reducer: The reducer command to execute.
input_path: The input file or directory on HDFS or S3.
output_dir: Where to write the output on HDFS or S3.
partitioner: Optional partitioner class name.
input_format: The inputformat to use.
output_format: The outputformat to use.
extra_arguments: List of additional optional arguments to pass into
the job. (see http://hadoop.apache.org/docs/mapreduce/r0.22.0/
streaming.html#Specifying+Communication+Formats+in+Detail)
extra_jars: List of additional jars to pass to hadoop streaming's
libjars param.
Returns:
Same as run_hive_query
Raises:
CalledProcessError if job fails.
"""
raise NotImplementedError(
"run_hadoop_streaming_job is not supported by this executor.")
def run_hadoop_job(self,
class_name,
jobconf_args=None,
extra_args=None,
extra_jars=None):
"""Runs a hadoop mapreduce job on the cluster.
Returns stdout and stderr in list-of-lists format
Args:
class_name: Java class name for this Hadoop app. This must be in a
jar defined in self.config.USER_APP_JAR.
jobconf_args: a dictionary of -D<key>=<value> settings
extra_args: list of additional args.
extra_jars: List of extra jars to pass to Hadoop's libjars param.
Returns:
Same as run_hive_query
Raises:
CalledProcessError if the command fails.
"""
raise NotImplementedError(
"run_hadoop_job is not supported by this executor.")
def kill_job(self, job_id):
"""Kills the Hadoop job with the given id."""
raise NotImplementedError("kill_job is not supported by this executor.")
def get_job_result(self, job_id):
"""Retrieves results for previously executed job."""
raise NotImplementedError("get_job_result is not supported by this executor.")
def get_job_resource_dir(self, run_as_user):
"""Path to location where job resources (jars, archives) are stored."""
raise NotImplementedError("get_job_resource_dir is not supported by this executor.")
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_table_description(self, table, database='default'):
"""Return table description string from Hive."""
rows, stderr, job_ids = self.run_hive_query(
"USE %s; DESCRIBE EXTENDED %s;" % (database, table),
upload_archive=False)
output = '\n'.join([' '.join(row) for row in rows])
result = re.match(r'.*Detailed Table Information (.*)$',
output,
re.DOTALL)
return result.groups()[0]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_table_location(self, table, database='default'):
"""Return the location of a table."""
table_info = self.get_table_description(table, database=database)
return re.match(r'.*location:([^,]*).*', table_info).groups()[0]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_table_locations(self, table, database='default', partition='dt'):
"""Return the locations for each partition of a table."""
location = self.get_table_location(table, database=database)
partitions = self.get_partitions(table, database=database)
return [os.path.join(location, partition, '=', part) for part in partitions]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def drop_partition(self, table, database, partition_name, partition):
"""Drop a partition in given table in Hive."""
self.run_hive_query(
"USE %s; ALTER TABLE %s DROP PARTITION(%s='%s');" %
(database, table, partition_name, partition),
upload_archive=False)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def drop_partitions_condition(self, table, database, condition):
"""Drop partitions satisfying the given condition."""
self.run_hive_query(
"USE %s; ALTER TABLE %s DROP PARTITION(%s);" %
(database, table, condition),
upload_archive=False)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def recover_partitions(self, table, database='default'):
"""Recover partitions in given table in Hive."""
self.run_hive_query(
"USE %s; ALTER TABLE %s RECOVER PARTITIONS;" %
(database, table),
upload_archive=False)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def add_partition(self, table, database, partition_names, partition_values):
"""Add a partition in given table in Hive.
Args:
partition_names: an individual partition name or a list of partition
names if the table has multiple partitions.
partitions_values: the actual partition(s) corresponding to
<partition_names>.
Examples:
add_partition(table, database, 'epoch', '2013-07-12-00-00')
add_partition(table,
database,
['epoch', 'action'], ['2013-07-12-00-00', 'a'])
"""
if not isinstance(partition_names, (list, tuple)):
partition_names = [partition_names]
if not isinstance(partition_values, (list, tuple)):
partition_values = [partition_values]
if len(partition_names) != len(partition_values):
raise ValueError('Unmatched partition param: %s vs %s' %
(partition_names, partition_values))
partition_str = \
','.join(["%s='%s'" % t for t in zip(partition_names, partition_values)])
self.run_hive_query(
"USE %s; ALTER TABLE %s ADD IF NOT EXISTS PARTITION(%s);" %
(database, table, partition_str),
upload_archive=False)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_partitions(self, table, database='default'):
"""Return a list of epoch strings that Hive knows about for this table.
This will return only the top partition for a multi-partitioned table.
"""
rows, stderr, job_ids = self.run_hive_query(
"USE %s; SHOW PARTITIONS %s;" % (database, table),
upload_archive=False)
return sorted(set([r[0].split('/')[0].split('=')[1] for r in rows]))
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_full_partitions(self, table, database='default'):
"""Return a list of partitions for a table.
Returns:
a list of (<partition_names>, <partition_values>) tuples, the same
format as accepted by the add_partition() function.
"""
rows, stderr, job_ids = self.run_hive_query(
"USE %s; SHOW PARTITIONS %s;" % (database, table),
upload_archive=False)
return [tuple(zip(*[pair.split('=') for pair in row[0].split('/')]))
for row in rows]
def get_available_dates(self, table, database='default'):
"""Return a list of datetime.dates that Hive knows for this table."""
partitions = self.get_partitions(table, database=database)
# Each row is of the form utc_date=<date>, so parse out the date part.
dates = [datetime.datetime.strptime(r, "%Y-%m-%d").date()
for r in partitions if not r.startswith('_distcp')]
return dates
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def does_table_exist(self, table, database='default'):
"""Return True if the table exists in the database, else False."""
rows, stderr, job_ids = self.run_hive_query(
"USE %s; DESCRIBE EXTENDED %s;" % (database, table),
upload_archive=False)
for row in rows[0]:
if row.startswith('Table %s does not exist' % table):
return False
return True
def get_table_latest_date_partition(self, table, database='default'):
"""Get the latest date partition for the given table."""
if not self.does_table_exist(table, database=database):
return False
existing_parts = self.get_available_dates(table, database=database)
if not len(existing_parts):
return None
return existing_parts[-1]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_partition_description(self, table, partition, database='default'):
"""Return partition description string from Hive."""
rows, stderr, job_ids = self.run_hive_query(
"USE %s; DESCRIBE EXTENDED %s PARTITION( %s );" %
(database, table, partition),
upload_archive=False)
return rows[-1][1]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def get_partition_location(self, table, partition, database='default'):
"""Return the location of table."""
part_info = self.get_partition_description(table,
partition,
database=database)
return re.match(r'.*location:([^,]*).*', part_info).groups()[0]
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def alter_table_set_location(self, table, location, database='default'):
"""Set the table location in Hive.
Args:
table: table to set.
location: path on S3 or HDFS.
"""
self.run_hive_query("USE %s; ALTER TABLE %s SET LOCATION '%s';" %
(database, table, location),
upload_archive=False)
@retry(subprocess.CalledProcessError, tries=3, delay=1, backoff=2)
def alter_table_rename(self, table, new_name, database):
"""Rename the table in Hive.
Args:
table: table to rename.
new_name: new name for the table.
"""
self.run_hive_query("USE %s; ALTER TABLE %s RENAME TO %s;" %
(database, table, new_name),
upload_archive=False)
###########################################
# Private helper functions
###########################################
def _generate_hive_query_header(self, upload_archive=False):
"""Generates a string to set Hive environment variables.
It requires that self._upload_archive() has been implemented by the
subclass.
Args:
upload_archive: a boolean indicating whether the archive should be
uploaded and added to the query header.
"""
full_query_string = ''
if upload_archive and self.config.USER_ARCHIVE_PATH:
uploaded_archive_path = self._upload_archive()
full_query_string += "add archive %s;\n" % uploaded_archive_path
full_query_string += 'set mapred.job.name=%s;\n' % self.job_name
return full_query_string
def _upload_archive(self):
"""Uploads self.config.USER_ARCHIVE_PATH to the cluster and returns
the path to the uploaded archive.
The returned path may be an S3 path or a path on the Hadoop master
node depending on the platform.
file to upload: self.config.USER_ARCHIVE_PATH
Returns:
The path where self.config.USER_ARCHIVE_PATH is uploaded to.
"""
raise NotImplementedError("Subclasses should implement this method")
def _get_scheduler_job_setting(self):
if not self.config.SCHEDULER_QUEUE:
return ''
else:
return ' -D%s=%s ' % (self.config.SCHEDULER_PARAM,
self.config.SCHEDULER_QUEUE)
def _get_scheduler_hive_setting(self):
if not self.config.SCHEDULER_QUEUE:
return ''
else:
# Can't have a trailing space after the new line since the following
# comment lines can't have a leading space.
return ' SET %s=%s;\n' % (self.config.SCHEDULER_PARAM,
self.config.SCHEDULER_QUEUE)
def _get_scheduler_mrjob_setting(self):
if not self.config.SCHEDULER_QUEUE:
return ''
else:
return ' --jobconf="%s=%s" ' % (self.config.SCHEDULER_PARAM,
self.config.SCHEDULER_QUEUE)
| apache-2.0 |
xxxIsaacPeralxxx/anim-studio-tools | grenade/sources/grenade/translators/scene.py | 5 | 1937 | #
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), its
# affiliates and/or its licensors.
#
from .entity import EntityTranslator
from ..converters.default import convert_links, convert_project
class SceneTranslator(EntityTranslator):
"""
Scene property translator.
Assigning this translator to a Scene model will cause inbound property values to be converted.
.. versionadded:: v00_04_00
"""
def __init__(self, session=None):
"""
Setup (register converters, etc) the new translator instance.
:param session:
An active Shotgun session.
.. versionadded:: v00_04_00
.. todo::
Replace convert_links usage with more entity specific converters (?)
"""
EntityTranslator.__init__(self, session)
self.register('project', convert_project)
self.register('assets', convert_links)
self.register('notes', convert_links)
self.register('open_notes', convert_links)
self.register('shots', convert_links)
self.register('tasks', convert_links)
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
blackbliss/medity-expo-2014 | remote-api/flask/lib/python2.7/site-packages/sqlalchemy/ext/mutable.py | 76 | 22912 | # ext/mutable.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's
legacy approach to in-place mutations of scalar values; see
:ref:`07_migration_mutation_extension`.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate the a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \\
other.x == self.x and \\
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from ..orm.attributes import flag_modified
from .. import event, types
from ..orm import mapper, object_mapper, Mapper
from ..util import memoized_property
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if value is oldvalue:
return value
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load,
raw=True, propagate=True)
event.listen(attribute, 'set', set,
raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle,
raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle,
raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :class:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if prop.columns[0].type is sqltype:
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
isinstance(prop.composite_class, type) and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not event.contains(Mapper, "mapper_configured", _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
.. versionadded:: 0.8
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
| mit |
kmonsoor/python-for-android | python3-alpha/extra_modules/gdata/contacts/data.py | 81 | 11775 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Contacts API."""
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
import atom.core
import gdata
import gdata.data
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE
class BillingInformation(atom.core.XmlElement):
"""
gContact:billingInformation
Specifies billing information of the entity represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'billingInformation'
class Birthday(atom.core.XmlElement):
"""
Stores birthday date of the person represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'birthday'
when = 'when'
class ContactLink(atom.data.Link):
"""
Extends atom.data.Link to add gd:etag attribute for photo link.
"""
etag = gdata.data.GD_TEMPLATE % 'etag'
class CalendarLink(atom.core.XmlElement):
"""
Storage for URL of the contact's calendar. The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'calendarLink'
rel = 'rel'
label = 'label'
primary = 'primary'
href = 'href'
class DirectoryServer(atom.core.XmlElement):
"""
A directory server associated with this contact.
May not be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'directoryServer'
class Event(atom.core.XmlElement):
"""
These elements describe events associated with a contact.
They may be repeated
"""
_qname = CONTACTS_TEMPLATE % 'event'
label = 'label'
rel = 'rel'
when = gdata.data.When
class ExternalId(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'externalId'
label = 'label'
rel = 'rel'
value = 'value'
def ExternalIdFromString(xml_string):
return atom.core.parse(ExternalId, xml_string)
class Gender(atom.core.XmlElement):
"""
Specifies the gender of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'gender'
value = 'value'
class Hobby(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'hobby'
class Initials(atom.core.XmlElement):
""" Specifies the initials of the person represented by the contact. The
element cannot be repeated. """
_qname = CONTACTS_TEMPLATE % 'initials'
class Jot(atom.core.XmlElement):
"""
Storage for arbitrary pieces of information about the contact. Each jot
has a type specified by the rel attribute and a text value.
The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'jot'
rel = 'rel'
class Language(atom.core.XmlElement):
"""
Specifies the preferred languages of the contact.
The element can be repeated.
The language must be specified using one of two mutually exclusive methods:
using the freeform @label attribute, or using the @code attribute, whose value
must conform to the IETF BCP 47 specification.
"""
_qname = CONTACTS_TEMPLATE % 'language'
code = 'code'
label = 'label'
class MaidenName(atom.core.XmlElement):
"""
Specifies maiden name of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'maidenName'
class Mileage(atom.core.XmlElement):
"""
Specifies the mileage for the entity represented by the contact.
Can be used for example to document distance needed for reimbursement
purposes. The value is not interpreted. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'mileage'
class NickName(atom.core.XmlElement):
"""
Specifies the nickname of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'nickname'
class Occupation(atom.core.XmlElement):
"""
Specifies the occupation/profession of the person specified by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'occupation'
class Priority(atom.core.XmlElement):
"""
Classifies importance of the contact into 3 categories:
* Low
* Normal
* High
The priority element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'priority'
class Relation(atom.core.XmlElement):
"""
This element describe another entity (usually a person) that is in a
relation of some kind with the contact.
"""
_qname = CONTACTS_TEMPLATE % 'relation'
rel = 'rel'
label = 'label'
class Sensitivity(atom.core.XmlElement):
"""
Classifies sensitivity of the contact into the following categories:
* Confidential
* Normal
* Personal
* Private
The sensitivity element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'sensitivity'
rel = 'rel'
class UserDefinedField(atom.core.XmlElement):
"""
Represents an arbitrary key-value pair attached to the contact.
"""
_qname = CONTACTS_TEMPLATE % 'userDefinedField'
key = 'key'
value = 'value'
def UserDefinedFieldFromString(xml_string):
return atom.core.parse(UserDefinedField, xml_string)
class Website(atom.core.XmlElement):
"""
Describes websites associated with the contact, including links.
May be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'website'
href = 'href'
label = 'label'
primary = 'primary'
rel = 'rel'
def WebsiteFromString(xml_string):
return atom.core.parse(Website, xml_string)
class HouseName(atom.core.XmlElement):
"""
Used in places where houses or buildings have names (and
not necessarily numbers), eg. "The Pillars".
"""
_qname = CONTACTS_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""
Can be street, avenue, road, etc. This element also includes the house
number and room/apartment/flat/floor number.
"""
_qname = CONTACTS_TEMPLATE % 'street'
class POBox(atom.core.XmlElement):
"""
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not
always mutually exclusive with street
"""
_qname = CONTACTS_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""
This is used to disambiguate a street address when a city contains more than
one street with the same name, or to specify a small place whose mail is
routed through a larger postal town. In China it could be a county or a
minor city.
"""
_qname = CONTACTS_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""
Can be city, village, town, borough, etc. This is the postal town and not
necessarily the place of residence or place of business.
"""
_qname = CONTACTS_TEMPLATE % 'city'
class SubRegion(atom.core.XmlElement):
"""
Handles administrative districts such as U.S. or U.K. counties that are not
used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = CONTACTS_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = CONTACTS_TEMPLATE % 'region'
class PostalCode(atom.core.XmlElement):
"""
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = CONTACTS_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
""" The name or code of the country. """
_qname = CONTACTS_TEMPLATE % 'country'
class Status(atom.core.XmlElement):
"""Person's status element."""
_qname = CONTACTS_TEMPLATE % 'status'
indexed = 'indexed'
class PersonEntry(gdata.data.BatchEntry):
"""Represents a google contact"""
link = [ContactLink]
billing_information = BillingInformation
birthday = Birthday
calendar_link = [CalendarLink]
directory_server = DirectoryServer
event = [Event]
external_id = [ExternalId]
gender = Gender
hobby = [Hobby]
initials = Initials
jot = [Jot]
language= [Language]
maiden_name = MaidenName
mileage = Mileage
nickname = NickName
occupation = Occupation
priority = Priority
relation = [Relation]
sensitivity = Sensitivity
user_defined_field = [UserDefinedField]
website = [Website]
name = gdata.data.Name
phone_number = [gdata.data.PhoneNumber]
organization = gdata.data.Organization
postal_address = [gdata.data.PostalAddress]
email = [gdata.data.Email]
im = [gdata.data.Im]
structured_postal_address = [gdata.data.StructuredPostalAddress]
extended_property = [gdata.data.ExtendedProperty]
status = Status
class Deleted(atom.core.XmlElement):
"""If present, indicates that this contact has been deleted."""
_qname = gdata.GDATA_TEMPLATE % 'deleted'
class GroupMembershipInfo(atom.core.XmlElement):
"""
Identifies the group to which the contact belongs or belonged.
The group is referenced by its id.
"""
_qname = CONTACTS_TEMPLATE % 'groupMembershipInfo'
href = 'href'
deleted = 'deleted'
class ContactEntry(PersonEntry):
"""A Google Contacts flavor of an Atom Entry."""
deleted = Deleted
group_membership_info = [GroupMembershipInfo]
organization = gdata.data.Organization
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
class ContactsFeed(gdata.data.BatchFeed):
"""A collection of Contacts."""
entry = [ContactEntry]
class SystemGroup(atom.core.XmlElement):
"""The contacts systemGroup element.
When used within a contact group entry, indicates that the group in
question is one of the predefined system groups."""
_qname = CONTACTS_TEMPLATE % 'systemGroup'
id = 'id'
class GroupEntry(gdata.data.BatchEntry):
"""Represents a contact group."""
extended_property = [gdata.data.ExtendedProperty]
system_group = SystemGroup
class GroupsFeed(gdata.data.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
entry = [GroupEntry]
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.core.parse(ProfileEntry, xml_string)
class ProfilesFeed(gdata.data.BatchFeed):
"""A Google Profiles feed flavor of an Atom Feed."""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
entry = [ProfileEntry]
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.core.parse(ProfilesFeed, xml_string)
| apache-2.0 |
MjAbuz/graphite-web | webapp/graphite/browser/views.py | 12 | 7133 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import re
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.conf import settings
from graphite.account.models import Profile
from graphite.util import getProfile, getProfileByUsername, defaultUser, json
from graphite.logger import log
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import cPickle as pickle
except ImportError:
import pickle
def header(request):
"View for the header frame of the browser UI"
context = {}
context['user'] = request.user
context['profile'] = getProfile(request)
context['documentation_url'] = settings.DOCUMENTATION_URL
context['login_url'] = settings.LOGIN_URL
return render_to_response("browserHeader.html", context)
def browser(request):
"View for the top-level frame of the browser UI"
context = {
'queryString' : request.GET.urlencode(),
'target' : request.GET.get('target')
}
if context['queryString']:
context['queryString'] = context['queryString'].replace('#','%23')
if context['target']:
context['target'] = context['target'].replace('#','%23') #js libs terminate a querystring on #
return render_to_response("browser.html", context)
def search(request):
query = request.POST['query']
if not query:
return HttpResponse("")
patterns = query.split()
regexes = [re.compile(p,re.I) for p in patterns]
def matches(s):
for regex in regexes:
if regex.search(s):
return True
return False
results = []
index_file = open(settings.INDEX_FILE)
for line in index_file:
if matches(line):
results.append( line.strip() )
if len(results) >= 100:
break
index_file.close()
result_string = ','.join(results)
return HttpResponse(result_string, mimetype='text/plain')
def myGraphLookup(request):
"View for My Graphs navigation"
profile = getProfile(request,allowDefault=False)
assert profile
nodes = []
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
try:
path = str( request.GET['path'] )
if path:
if path.endswith('.'):
userpath_prefix = path
else:
userpath_prefix = path + '.'
else:
userpath_prefix = ""
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(userpath_prefix) ]
log.info( "myGraphLookup: username=%s, path=%s, userpath_prefix=%s, %ld graph to process" % (profile.user.username, path, userpath_prefix, len(matches)) )
branch_inserted = set()
leaf_inserted = set()
for graph in matches: #Now let's add the matching graph
isBranch = False
dotPos = graph.name.find( '.', len(userpath_prefix) )
if dotPos >= 0:
isBranch = True
name = graph.name[ len(userpath_prefix) : dotPos ]
if name in branch_inserted: continue
branch_inserted.add(name)
else:
name = graph.name[ len(userpath_prefix): ]
if name in leaf_inserted: continue
leaf_inserted.add(name)
node = {'text' : str(name) }
if isBranch:
node.update( { 'id' : str(userpath_prefix + name + '.') } )
node.update(branchNode)
else:
m = md5()
m.update(name)
node.update( { 'id' : str(userpath_prefix + m.hexdigest()), 'graphUrl' : str(graph.url) } )
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.myGraphLookup(): could not complete request.")
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def userGraphLookup(request):
"View for User Graphs navigation"
user = request.GET.get('user')
path = request.GET['path']
if user:
username = user
graphPath = path[len(username)+1:]
elif '.' in path:
username, graphPath = path.split('.', 1)
else:
username, graphPath = path, None
nodes = []
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
try:
if not username:
profiles = Profile.objects.exclude(user=defaultUser)
for profile in profiles:
if profile.mygraph_set.count():
node = {
'text' : str(profile.user.username),
'id' : str(profile.user.username)
}
node.update(branchNode)
nodes.append(node)
else:
profile = getProfileByUsername(username)
assert profile, "No profile for username '%s'" % username
if graphPath:
prefix = graphPath.rstrip('.') + '.'
else:
prefix = ''
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(prefix) ]
inserted = set()
for graph in matches:
relativePath = graph.name[ len(prefix): ]
nodeName = relativePath.split('.')[0]
if nodeName in inserted:
continue
inserted.add(nodeName)
if '.' in relativePath: # branch
node = {
'text' : str(nodeName),
'id' : str(username + '.' + prefix + nodeName + '.'),
}
node.update(branchNode)
else: # leaf
m = md5()
m.update(nodeName)
node = {
'text' : str(nodeName ),
'id' : str(username + '.' + prefix + m.hexdigest()),
'graphUrl' : str(graph.url),
}
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.userLookup(): could not complete request for %s" % username)
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def json_response(nodes, request=None):
if request:
jsonp = request.REQUEST.get('jsonp', False)
else:
jsonp = False
#json = str(nodes) #poor man's json encoder for simple types
json_data = json.dumps(nodes)
if jsonp:
response = HttpResponse("%s(%s)" % (jsonp, json_data),mimetype="text/javascript")
else:
response = HttpResponse(json_data,mimetype="application/json")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def any(iterable): #python2.4 compatibility
for i in iterable:
if i:
return True
return False
| apache-2.0 |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_openshift/src/class/oc_serviceaccount_secret.py | 66 | 4640 | # pylint: skip-file
# flake8: noqa
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the ansible idempotent code '''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
| apache-2.0 |
cancerregulome/Addama | svc.py | 2 | 8391 | #!/usr/bin/env python
"""
Simple tornado app to list and read files.
Start server:
python svc.py -port=8001
(Default port is 8000)
Using service
/
List:
/data/path/to/dir
Read:
/data/path/to/file
Filter:
/data/path/to/file?rows=id1,id2&cols=colid1,colid2
All services return status_code 500 if there is any error.
"""
import logging
import tornado.ioloop
from tornado.options import define, options
import tornado.web
import json
import os
from pretty_json import PrettyJsonRequestHandler
from oauth.google import GoogleOAuth2SignInHandler, GoogleOAuth2CallbackHandler, GoogleOAuth2RefreshTokenHandler, GoogleSignoutHandler
from oauth.google import GoogleApisOAuthProxyHandler, GoogleOAuthDownloadProxyHandler, GOOGLE_APIS, GOOGLE_SPREADSHEET_APIS
from datastores.mongo import MongoDbQueryHandler
from datastores.localfiles import LocalFileHandler
from storage.mongo import MongoDbStorageHandler
from storage.collections import MongoDbCollectionsHandler, MongoDbListCollectionsHandler
from scc.github import GitWebHookHandler
define("cookie_id", default="whoami_addama", help="Cookie ID for application instance; stores user id encrypted")
define("service_root", default="/", help="Helps to control path if its being proxied")
define("oauth_uri_root", default="/", help="Helps to control path for OAUTH callback if its being proxied")
define("data_path", default="../..", help="Path to data files")
define("port", default=8000, help="run on the given port", type=int)
define("client_host", default="http://localhost:8000", help="Client URL for Google OAuth2")
define("client_id", help="Client ID for Google OAuth2")
define("client_secret", help="Client Secrets for Google OAuth2")
define("config_file", help="Path to config file")
define("config_file_json", help="Path to JSON config file")
define("authorized_users", default=[], help="List of authorized user emails")
define("mongo_storage_uri", default="mongodb://localhost:27017", help="MongoDB URI in the form mongodb://username:password@hostname:port")
define("mongo_storage_db", default="storage_db", help="MongoDB database name")
define("mongo_datastores", default=[("ds", "mongodb://localhost:27017")], help="Lookup MongoDB configurations")
define("mongo_rows_limit", default=1000, type=int, help="Lookup MongoDB limit on rows returned from query")
define("case_sensitive_lookups", default=[], help="List of database names to apply case sensitive lookups")
define("github_repo_api_url", help="Link to repository api url (see examples/svc.config)")
define("github_project_root", help="Local path to main repository branch")
define("github_branches_root", help="Local path to top-level branches directory")
define("github_postproc_cmd", help="Command-line to execute after checkout")
define("github_git_cmd", help="Path to git executable", default="git")
define("github_branches_json_path", help="Path to publish branches json", default=".")
define("verbose", default=False, type=bool, help="Enable verbose printouts")
settings = {
"debug": True,
"cookie_secret": "not_a_big_secret"
}
server_settings = {
"xheaders" : True,
"address" : "0.0.0.0"
}
class DataStoreConfiguration(object):
def __init__(self, uri, case_sensitive_databases):
self.set_uri(uri)
self.case_sensitive_databases = frozenset(case_sensitive_databases)
def get_uri(self):
return self._uri
def set_uri(self, uri):
self._uri = uri
def is_case_sensitive_database(self, database_name):
return database_name in self.case_sensitive_databases
uri = property(get_uri, set_uri)
class MainHandler(PrettyJsonRequestHandler):
def get(self):
items = []
items.append({ "id": "data", "uri": self.request.path + "data" })
items.append({ "id": "datastores", "uri": self.request.path + "datastores" })
items.append({ "id": "collections", "uri": self.request.path + "collections" })
self.write({"items": items})
self.set_status(200)
class AuthProvidersHandler(tornado.web.RequestHandler):
def get(self):
google_provider = { "id": "google", "label": "Google+", "active": False, "logo": "https://www.google.com/images/icons/ui/gprofile_button-64.png" }
self.write({"providers": [ google_provider ] })
self.set_status(200)
def parse_datastore_configuration():
datastore_map = {}
for datastore_config in options.mongo_datastores:
if (len(datastore_config) == 2):
datastore_id, uri = datastore_config
datastore_map[datastore_id] = DataStoreConfiguration(uri, [])
elif (len(datastore_config) == 3):
datastore_id, uri, case_sensitive_databases = datastore_config
datastore_map[datastore_id] = DataStoreConfiguration(uri, case_sensitive_databases)
else:
logging.error("Invalid datastore config: " + repr(datastore_config))
return datastore_map
def main():
options.parse_command_line()
if not options.config_file is None:
options.parse_config_file(options.config_file)
options.parse_command_line()
if options.client_secret:
settings["cookie_secret"] = options.client_secret
logging.info("Starting Tornado web server on http://localhost:%s" % options.port)
logging.info("--cookie_id=%s" % options.cookie_id)
logging.info("--service_root=%s" % options.service_root)
logging.info("--data_path=%s" % options.data_path)
logging.info("--client_host=%s" % options.client_host)
logging.info("--authorized_users=%s" % options.authorized_users)
logging.info("--mongo_storage_uri=%s" % options.mongo_storage_uri)
logging.info("--mongo_storage_db=%s" % options.mongo_storage_db)
logging.info("--mongo_rows_limit=%s" % options.mongo_rows_limit)
if not options.config_file is None:
logging.info("--config_file=%s" % options.config_file)
if not options.config_file_json is None:
logging.info("--config_file_json=%s" % options.config_file_json)
if not options.github_repo_api_url is None:
logging.info("--github_repo_api_url=%s" % options.github_repo_api_url)
logging.info("--github_project_root=%s" % options.github_project_root)
logging.info("--github_branches_root=%s" % options.github_branches_root)
logging.info("--github_postproc_cmd=%s" % options.github_postproc_cmd)
logging.info("--github_git_cmd=%s" % options.github_git_cmd)
logging.info("--github_branches_json_path=%s" % options.github_branches_json_path)
logging.info("Starting GitHub Web Hook at http://localhost:%s/gitWebHook" % options.port)
MongoDbQueryHandler.datastores = parse_datastore_configuration()
if not options.config_file_json is None:
MongoDbQueryHandler.datastores_config = json.load(open(options.config_file_json))
static_file_path = "/%s/static" % os.path.abspath(__file__).strip("svc.py").strip("/")
if options.verbose: logging.info("static files served from: %s" % static_file_path)
application = tornado.web.Application([
(r"/", MainHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, { "path": static_file_path }),
(r"/auth/signin/google", GoogleOAuth2SignInHandler),
(r"/auth/signin/google/oauth2_callback", GoogleOAuth2CallbackHandler),
(r"/auth/signin/google/refresh", GoogleOAuth2RefreshTokenHandler),
(r"/auth/signout/google", GoogleSignoutHandler),
(r"/auth/providers", AuthProvidersHandler),
(r"/auth/providers/google_download", GoogleOAuthDownloadProxyHandler),
(r"/auth/providers/google_apis/(.*)", GoogleApisOAuthProxyHandler, dict(api_domain=GOOGLE_APIS)),
(r"/auth/providers/google_spreadsheets/(.*)", GoogleApisOAuthProxyHandler, dict(api_domain=GOOGLE_SPREADSHEET_APIS)),
(r"/datastores", MongoDbQueryHandler),
(r"/datastores/(.*)", MongoDbQueryHandler),
(r"/data?(.*)", LocalFileHandler),
(r"/storage/(.*)", MongoDbStorageHandler),
(r"/collections", MongoDbListCollectionsHandler),
(r"/collections/", MongoDbListCollectionsHandler),
(r"/collections/(.*)", MongoDbCollectionsHandler),
(r"/gitWebHook?(.*)", GitWebHookHandler)
], **settings)
application.listen(options.port, **server_settings)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| mit |
versae/neo4j-rest-client | neo4jrestclient/iterable.py | 1 | 2472 | # -*- coding: utf-8 -*-
class Iterable(list):
"""
Class to iterate among returned objects.
"""
def __init__(self, cls, lst=None, attr=None, auth=None, cypher=None):
if lst is None:
lst = []
self._auth = auth or {}
self._cypher = cypher
self._list = lst
self._index = len(lst)
self._class = cls
self._attribute = attr
super(Iterable, self).__init__(lst)
def __getslice__(self, *args, **kwargs):
eltos = super(Iterable, self).__getslice__(*args, **kwargs)
if self._attribute:
return [self._class(elto[self._attribute], update_dict=elto,
auth=self._auth, cypher=self._cypher)
for elto in eltos]
else:
return [self._class(elto, auth=self._auth, cypher=self._cypher)
for elto in eltos]
def __getitem__(self, index):
elto = super(Iterable, self).__getitem__(index)
if self._attribute:
return self._class(elto[self._attribute], update_dict=elto,
auth=self._auth, cypher=self._cypher)
else:
return self._class(elto, auth=self._auth, cypher=self._cypher)
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u"<Neo4j %s: %s>" % (self.__class__.__name__,
self._class.__name__)
def __contains__(self, value):
# TODO: Find a better way to check if value is instance of Base
# avoiding a circular loop of imports
# if isinstance(value, Base) and hasattr(value, "url"):
if (hasattr(value, "url") and hasattr(value, "id")
and hasattr(value, "_dic")):
if self._attribute:
return value.url in [elto[self._attribute]
for elto in self._list]
else:
return value.url in self._list
return False
def __iter__(self):
return self
@property
def single(self):
try:
return self[0]
except KeyError:
return None
def __next__(self):
if self._index == 0:
raise StopIteration
self._index = self._index - 1
return self.__getitem__(self._index)
def next(self):
return self.__next__()
| gpl-3.0 |
comsaint/ccbc-library | ccbclib/tables.py | 2 | 1277 | import django_tables2 as tables
from ccbclib.models import Book, Borrower, Transaction
class BookTable(tables.Table):
idbook = tables.Column(verbose_name="id")
get_area = tables.Column(verbose_name="Field",orderable=False)
get_language = tables.Column(verbose_name="Language",orderable=False)
get_book_status = tables.Column(verbose_name="Status",orderable=False)
get_times_borrowed = tables.Column(verbose_name="Times borrowed",orderable=False)
class Meta:
model = Book
attrs = {"class": "paleblue"}
exclude = ("statusflag",)
class BorrowerTable(tables.Table):
idborrower = tables.Column(verbose_name="id")
get_borrower_status = tables.Column(verbose_name="Status",orderable=False)
class Meta:
model = Borrower
attrs = {"class": "paleblue"}
exclude = ("statusflag",)
class TransactionTable(tables.Table):
idtransaction = tables.Column(verbose_name="id")
cal_due_date = tables.DateColumn(verbose_name="Due Date",orderable=False)
is_returned = tables.BooleanColumn(verbose_name="Is it returned?",orderable=False)
is_overdue = tables.BooleanColumn(verbose_name="Is it overdue?",orderable=False)
class Meta:
model = Transaction
attrs = {"class": "paleblue"} | mit |
ahmadshahwan/ipopo | pelix/misc/mqtt_client.py | 3 | 13064 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
MQTT client utility: Tries to hide Paho client details to ease MQTT usage.
Reconnects to the MQTT server automatically.
This module depends on the paho-mqtt package (ex-mosquitto), provided by the
Eclipse Foundation: see http://www.eclipse.org/paho
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import os
import sys
import threading
# MQTT client
import paho.mqtt.client as paho
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class MqttClient(object):
"""
Remote Service discovery provider based on MQTT
"""
def __init__(self, client_id=None):
"""
Sets up members
:param client_id: ID of the MQTT client
"""
# No ID
if not client_id:
# Randomize client ID
self._client_id = self.generate_id()
elif len(client_id) > 23:
# ID too large
_logger.warning("MQTT Client ID '%s' is too long (23 chars max): "
"generating a random one", client_id)
# Keep the client ID as it might be accepted
self._client_id = client_id
else:
# Keep the ID as is
self._client_id = client_id
# Reconnection timer
self.__timer = threading.Timer(5, self.__reconnect)
# Publication events
self.__in_flight = {}
# MQTT client
self.__mqtt = paho.Client(self._client_id)
# Give access to Paho methods to configure TLS
self.tls_set = self.__mqtt.tls_set
# Paho callbacks
self.__mqtt.on_connect = self.__on_connect
self.__mqtt.on_disconnect = self.__on_disconnect
self.__mqtt.on_message = self.__on_message
self.__mqtt.on_publish = self.__on_publish
@property
def raw_client(self):
"""
Returns the raw client object, depending on the underlying library
"""
return self.__mqtt
@staticmethod
def on_connect(client, result_code):
"""
User callback: called when the client is connected
:param client: The Pelix MQTT client which connected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_disconnect(client, result_code):
"""
User callback: called when the client is disconnected
:param client: The Pelix MQTT client which disconnected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_message(client, message):
"""
User callback: called when the client has received a message
:param client: The Pelix MQTT client which received a message
:param message: The MQTT message
"""
pass
@classmethod
def generate_id(cls, prefix="pelix-"):
"""
Generates a random MQTT client ID
:param prefix: Client ID prefix (truncated to 8 chars)
:return: A client ID of 22 or 23 characters
"""
if not prefix:
# Normalize string
prefix = ""
else:
# Truncate long prefixes
prefix = prefix[:8]
# Prepare the missing part
nb_bytes = (23 - len(prefix)) // 2
random_bytes = os.urandom(nb_bytes)
if sys.version_info[0] >= 3:
random_ints = [char for char in random_bytes]
else:
random_ints = [ord(char) for char in random_bytes]
random_id = ''.join('{0:02x}'.format(value) for value in random_ints)
return "{0}{1}".format(prefix, random_id)
@classmethod
def topic_matches(cls, subscription_filter, topic):
"""
Checks if the given topic matches the given subscription filter
:param subscription_filter: A MQTT subscription filter
:param topic: A topic
:return: True if the topic matches the filter
"""
return paho.topic_matches_sub(subscription_filter, topic)
@property
def client_id(self):
"""
The MQTT client ID
"""
return self._client_id
def set_credentials(self, username, password):
"""
Sets the user name and password to be authenticated on the server
:param username: Client username
:param password: Client password
"""
self.__mqtt.username_pw_set(username, password)
def set_will(self, topic, payload, qos=0, retain=False):
"""
Sets up the will message
:param topic: Topic of the will message
:param payload: Content of the message
:param qos: Quality of Service
:param retain: The message will be retained
:raise ValueError: Invalid topic
:raise TypeError: Invalid payload
"""
self.__mqtt.will_set(topic, payload, qos, retain=retain)
def connect(self, host="localhost", port=1883, keepalive=60):
"""
Connects to the MQTT server. The client will automatically try to
reconnect to this server when the connection is lost.
:param host: MQTT server host
:param port: MQTT server port
:param keepalive: Maximum period in seconds between communications with
the broker
:raise ValueError: Invalid host or port
"""
# Disconnect first (it also stops the timer)
self.disconnect()
# Prepare the connection
self.__mqtt.connect(host, port, keepalive)
# Start the MQTT loop
self.__mqtt.loop_start()
def disconnect(self):
"""
Disconnects from the MQTT server
"""
# Stop the timer
self.__stop_timer()
# Unlock all publishers
for event in self.__in_flight.values():
event.set()
# Disconnect from the server
self.__mqtt.disconnect()
# Stop the MQTT loop thread
# Use a thread to avoid a dead lock in Paho
thread = threading.Thread(target=self.__mqtt.loop_stop)
thread.daemon = True
thread.start()
# Give it some time
thread.join(4)
def publish(self, topic, payload, qos=0, retain=False, wait=False):
"""
Sends a message through the MQTT connection
:param topic: Message topic
:param payload: Message content
:param qos: Quality of Service
:param retain: Retain flag
:param wait: If True, prepares an event to wait for the message to be
published
:return: The local message ID, None on error
"""
result = self.__mqtt.publish(topic, payload, qos, retain)
if wait and not result[0]:
# Publish packet sent, wait for it to return
self.__in_flight[result[1]] = threading.Event()
_logger.debug("Waiting for publication of %s", topic)
return result[1]
def wait_publication(self, mid, timeout=None):
"""
Wait for a publication to be validated
:param mid: Local message ID (result of publish)
:param timeout: Wait timeout (in seconds)
:return: True if the message was published, False if timeout was raised
:raise KeyError: Unknown waiting local message ID
"""
return self.__in_flight[mid].wait(timeout)
def subscribe(self, topic, qos=0):
"""
Subscribes to a topic on the server
:param topic: Topic filter string(s)
:param qos: Desired quality of service
:raise ValueError: Invalid topic or QoS
"""
self.__mqtt.subscribe(topic, qos)
def unsubscribe(self, topic):
"""
Unscribes from a topic on the server
:param topic: Topic(s) to unsubscribe from
:raise ValueError: Invalid topic parameter
"""
self.__mqtt.unsubscribe(topic)
def __start_timer(self, delay):
"""
Starts the reconnection timer
:param delay: Delay (in seconds) before calling the reconnection method
"""
self.__timer = threading.Timer(delay, self.__reconnect)
self.__timer.daemon = True
self.__timer.start()
def __stop_timer(self):
"""
Stops the reconnection timer, if any
"""
if self.__timer is not None:
self.__timer.cancel()
self.__timer = None
def __reconnect(self):
"""
Tries to connect to the MQTT server
"""
# Cancel the timer, if any
self.__stop_timer()
try:
# Try to reconnect the server
result_code = self.__mqtt.reconnect()
if result_code:
# Something wrong happened
message = "Error connecting the MQTT server: {0} ({1})" \
.format(result_code, paho.error_string(result_code))
_logger.error(message)
raise ValueError(message)
except Exception as ex:
# Something went wrong: log it
_logger.error("Exception connecting server: %s", ex)
finally:
# Prepare a reconnection timer. It will be cancelled by the
# on_connect callback
self.__start_timer(10)
def __on_connect(self, client, userdata, flags, result_code):
"""
Client connected to the server
:param client: Connected Paho client
:param userdata: User data (unused)
:param flags: Response flags sent by the broker
:param result_code: Connection result code (0: success, others: error)
"""
if result_code:
# result_code != 0: something wrong happened
_logger.error("Error connecting the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
else:
# Connection is OK: stop the reconnection timer
self.__stop_timer()
# Notify the caller, if any
if self.on_connect is not None:
try:
self.on_connect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_disconnect(self, client, userdata, result_code):
"""
Client has been disconnected from the server
:param client: Client that received the message
:param userdata: User data (unused)
:param result_code: Disconnection reason (0: expected, 1: error)
"""
if result_code:
# rc != 0: unexpected disconnection
_logger.error(
"Unexpected disconnection from the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
# Try to reconnect
self.__stop_timer()
self.__start_timer(2)
# Notify the caller, if any
if self.on_disconnect is not None:
try:
self.on_disconnect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_message(self, client, userdata, msg):
"""
A message has been received from a server
:param client: Client that received the message
:param userdata: User data (unused)
:param msg: A MQTTMessage bean
"""
# Notify the caller, if any
if self.on_message is not None:
try:
self.on_message(self, msg)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_publish(self, client, userdata, mid):
"""
A message has been published by a server
:param client: Client that received the message
:param userdata: User data (unused)
:param mid: Message ID
"""
try:
self.__in_flight[mid].set()
except KeyError:
pass
| apache-2.0 |
captsens/clichart | src/test/python/histogramTest.py | 1 | 5666 | #!/usr/bin/env python
"""
Unit tests for histogram.py
TODO:
- Improve tests for calculating histogram
- Add tests for output
"""
import unittest
from StringIO import StringIO
from histogram import *
from statslib import InvalidDataException
CSV_INPUT = """1, 17.2, 55, 120.888
6, 4, 51, 220.888
4, 12.6, 52, 120.888
4, 13.2, 52, 320.888
3, 13.19, 55, 120.888
-1, 12.88, 58, 20.888"""
HEADER = 'column_0, column_1, column_2, column_3\n'
# contain (minValue, maxValue, allValues) for each column
COLUMN_RESULTS = (
(-1, 6, [-1, 1, 3, 4, 4, 6]),
(4, 17.2, [17.2, 4, 12.6, 13.2, 13.19, 12.88]),
(51, 58, [55, 51, 52, 52, 55, 58]),
(20.888, 320.888, [20.888, 120.888, 120.888, 120.888, 220.888, 320.888]))
CSV_OUTPUT = """51, 52, 1, 16.667
52, 53, 2, 33.333
53, 54, 0, 0.000
54, 55, 0, 0.000
55, 56, 2, 33.333
56, 57, 0, 0.000
57, 58, 1, 16.667
"""
# ============================================================================
class HistogramTest(unittest.TestCase):
def testParseData_CsvNoHeader(self):
self._testParseData(CSV_INPUT, True, False)
def testParseData_CsvWithHeader(self):
self._testParseData(self._getInputWithHeader(), True, True)
def testParseData_TextNoHeader(self):
self._testParseData(CSV_INPUT.replace(',', ' '), False, False)
def testParseData_TextWithHeader(self):
self._testParseData(self._getInputWithHeader().replace(',', ' '), False, True)
def testParseData_NonNumeric(self):
testData = CSV_INPUT.replace('12.88', 'xxx')
try:
self._testParseData(testData, True, False)
self.fail()
except InvalidDataException, e:
pass
def testCalculateHistogram_numIntervals(self):
minValue, maxValue, allValues = COLUMN_RESULTS[0]
options = self._buildOptions(numIntervals=7, showPercent=True)
intervals = calculateHistogram(allValues, minValue, maxValue, options)
self._validateHistogram(intervals)
options.cumulative = True
intervals = calculateHistogram(allValues, minValue, maxValue, options)
self._validateHistogram_cumulative(intervals)
def testCalculateHistogram_intervalSize(self):
minValue, maxValue, allValues = COLUMN_RESULTS[0]
options = self._buildOptions(intervalSize=1, showPercent=True)
intervals = calculateHistogram(allValues, minValue, maxValue, options)
self._validateHistogram(intervals)
options.cumulative = True
intervals = calculateHistogram(allValues, minValue, maxValue, options)
self._validateHistogram_cumulative(intervals)
def _buildOptions(self, **kw):
options = Options()
for property, value in kw.items():
setattr(options, property, value)
return options
def _validateHistogram(self, intervals):
self._assertListsAlmostEqual([1, 0, 1, 0, 1, 2, 1], [interval.count for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([-1, 0, 1, 2, 3, 4, 5], [interval.startValue for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([0, 1, 2, 3, 4, 5, 6], [interval.endValue for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([100/6.0, 0, 100/6.0, 0, 100/6.0, 200/6.0, 100/6.0],
[interval.percentage for interval in intervals], sortLists = False)
def _validateHistogram_cumulative(self, intervals):
self._assertListsAlmostEqual([1, 1, 2, 2, 3, 5, 6], [interval.count for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([-1, 0, 1, 2, 3, 4, 5], [interval.startValue for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([0, 1, 2, 3, 4, 5, 6], [interval.endValue for interval in intervals], sortLists = False)
self._assertListsAlmostEqual([100/6.0, 100/6.0, 200/6.0, 200/6.0, 300/6.0, 500/6.0, 600/6.0],
[interval.percentage for interval in intervals], sortLists = False)
def _testParseData(self, testData, isCsv, skipFirst):
options = self._buildOptions(isCsv=isCsv, skipFirst=skipFirst)
for columnIndex in range(4):
options.columnIndex = columnIndex
minValue, maxValue, allValues = parseData(StringIO(testData), options)
self._verifyParseDataResults(minValue, maxValue, allValues, *COLUMN_RESULTS[columnIndex])
def _verifyParseDataResults(self, minValue, maxValue, allValues, expectedMinValue, expectedMaxValue, expectedAllValues):
self.assertAlmostEqual(expectedMinValue, minValue)
self.assertAlmostEqual(expectedMaxValue, maxValue)
if expectedAllValues is not None:
self._assertListsAlmostEqual(expectedAllValues, allValues)
def _getInputWithHeader(self):
return HEADER + CSV_INPUT
def testOutputHistogram(self):
options = self._buildOptions(numIntervals=7, isCsv=True, columnIndex=0, showPercent=True)
outFile = StringIO()
minValue, maxValue, values = COLUMN_RESULTS[2]
outputHistogram(outFile, values, minValue, maxValue, options)
self.assertEqual(CSV_OUTPUT, outFile.getvalue())
def _assertListsAlmostEqual(self, listA, listB, sortLists = True):
self.assertEqual(len(listA), len(listB))
if sortLists:
listA = sorted(listA)
listB = sorted(listB)
for valueA, valueB in zip(listA, listB):
self.assertAlmostEqual(valueA, valueB)
# ============================================================================
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/test_utils/project/emailuserapp/forms.py | 4 | 3574 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from cms.compat import get_user_model
from .models import EmailUser
class UserCreationForm(forms.ModelForm):
"""
A form for creating a new user, including the required
email and password fields.
"""
error_messages = {
'duplicate_email': "A user with that email already exists.",
'password_mismatch': "The two password fields didn't match.",
}
email = forms.EmailField(
label='Email',
help_text="Required. Standard format email address.",
)
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Password confirmation',
widget=forms.PasswordInput,
help_text="Enter the same password as above, for verification."
)
class Meta:
model = EmailUser
fields = ('email',)
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = self.cleaned_data["email"]
User = get_user_model()
try:
User._default_manager.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages['duplicate_email'],
code='duplicate_email',
)
def clean_password2(self):
# check that the two passwords match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""
A form for updating users, including all fields on the user,
but replaces the password field with admin's password hash display
field.
"""
email = forms.EmailField(
label='Email',
help_text = "Required. Standard format email address.",
)
password = ReadOnlyPasswordHashField(label="Password",
help_text="Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>.")
class Meta:
model = EmailUser
fields = ('email', 'password', 'first_name', 'last_name', 'is_active',
'is_staff', 'is_superuser', 'groups', 'user_permissions', 'last_login',
'date_joined')
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
"""
Regardless of what the user provides, return the initial value.
This is done here, rather than on the field, because the
field does not have access to the inital value.
"""
return self.initial["password"]
| mit |
cstipkovic/spidermonkey-research | testing/web-platform/tests/tools/pytest/testing/test_helpconfig.py | 188 | 2031 | from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
def test_version(testdir, pytestconfig):
result = testdir.runpytest("--version")
assert result.ret == 0
#p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines([
'*pytest*%s*imported from*' % (pytest.__version__, )
])
if pytestconfig.pluginmanager.list_plugin_distinfo():
result.stderr.fnmatch_lines([
"*setuptools registered plugins:",
"*at*",
])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines("""
*-v*verbose*
*setup.cfg*
*minversion*
*to see*markers*py.test --markers*
*to see*fixtures*py.test --fixtures*
""")
def test_hookvalidation_unknown(testdir):
testdir.makeconftest("""
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
'*unknown hook*pytest_hello*'
])
def test_hookvalidation_optional(testdir):
testdir.makeconftest("""
import pytest
@pytest.hookimpl(optionalhook=True)
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines([
"*using*pytest*py*",
"*active plugins*",
])
def test_debug(testdir, monkeypatch):
result = testdir.runpytest_subprocess("--debug")
assert result.ret == EXIT_NOTESTSCOLLECTED
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest_subprocess()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"*pytest_plugin_registered*",
"*manager*PluginManager*"
])
| mpl-2.0 |
arante/pyloc | microblog/flask/lib/python3.5/site-packages/setuptools/site-patch.py | 720 | 2389 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| gpl-3.0 |
ericdill/scikit-xray | skbeam/core/constants/tests/test_xrs.py | 7 | 3697 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 08/19/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal
from skbeam.core.constants.xrs import HKL, calibration_standards
from skbeam.core.utils import q_to_d, d_to_q
def smoke_test_powder_standard():
name = 'Si'
cal = calibration_standards[name]
assert(name == cal.name)
for d, hkl, q in cal:
assert_array_almost_equal(d_to_q(d), q)
assert_array_almost_equal(q_to_d(q), d)
assert_array_equal(np.linalg.norm(hkl), hkl.length)
assert_equal(str(cal), "Calibration standard: Si")
assert_equal(len(cal), 11)
def test_hkl():
a = HKL(1, 1, 1)
b = HKL('1', '1', '1')
c = HKL(h='1', k='1', l='1')
d = HKL(1.5, 1.5, 1.75)
assert_equal(a, b)
assert_equal(a, c)
assert_equal(a, d)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| bsd-3-clause |
shaumux/semc-kernel-qsd8k-jb | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
kenrick95/airmozilla | airmozilla/new/urls.py | 11 | 1837 | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^(?P<template_name>\w+\.html)$',
views.partial_template,
name='partial_template'),
url(r'^api/yours/$',
views.your_events,
name='your_events'),
url(r'^api/save/$',
views.save_upload,
name='save_upload'),
url(r'^api/videos/$',
views.videos,
name='videos'),
url(r'^api/(?P<id>\d+)/$',
views.event_edit,
name='edit'),
url(r'^api/(?P<id>\d+)/archive/$',
views.event_archive,
name='archive'),
url(r'^api/(?P<id>\d+)/screencaptures/$',
views.event_screencaptures,
name='screencaptures'),
url(r'^api/(?P<id>\d+)/picture/$',
views.event_picture,
name='picture'),
url(r'^api/(?P<id>\d+)/summary/$',
views.event_summary,
name='summary'),
url(r'^api/(?P<id>\d+)/video/$',
views.event_video,
name='video'),
url(r'^api/(?P<id>\d+)/publish/$',
views.event_publish,
name='publish'),
url(r'^api/(?P<id>\d+)/delete/$',
views.event_delete,
name='delete'),
url(r'^api/(?P<id>\d+)/rotate/pictures/$',
views.event_pictures_rotate,
name='pictures_rotate'),
# Do this to avoid any undefined api calls to be attempted as an
# html pushstate thing in ui-router.
url(r'^api/',
'django.views.defaults.page_not_found'),
url(r'^vidly/webhook/$',
views.vidly_media_webhook,
name='vidly_media_webhook'),
url(r'unsubscribed/$',
views.unsubscribed,
name='unsubscribed'),
url(r'unsubscribe/(?P<identifier>\w{10})/$',
views.unsubscribe,
name='unsubscribe'),
# lastly
url(r'',
views.home,
name='home'),
)
| bsd-3-clause |
osharim/-heroku-buildpack-python | vendor/distribute-0.6.36/setuptools/tests/test_dist_info.py | 69 | 2480 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import unittest
import textwrap
try:
import ast
except:
pass
import pkg_resources
from setuptools.tests.py26compat import skipIf
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
class TestDistInfo(unittest.TestCase):
def test_distinfo(self):
dists = {}
for d in pkg_resources.find_distributions(self.tmpdir):
dists[d.project_name] = d
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@skipIf('ast' not in globals(),
"ast is used to test conditional dependencies (Python >= 2.6)")
def test_conditional_dependencies(self):
requires = [pkg_resources.Requirement.parse('splort==4'),
pkg_resources.Requirement.parse('quux>=1.1')]
for d in pkg_resources.find_distributions(self.tmpdir):
self.assertEqual(d.requires(), requires[:1])
self.assertEqual(d.requires(extras=('baz',)), requires)
self.assertEqual(d.extras, ['baz'])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
versioned = os.path.join(self.tmpdir,
'VersionedDistribution-2.718.dist-info')
os.mkdir(versioned)
metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+')
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: VersionedDistribution
Requires-Dist: splort (4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
metadata_file.close()
unversioned = os.path.join(self.tmpdir,
'UnversionedDistribution.dist-info')
os.mkdir(unversioned)
metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+')
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: UnversionedDistribution
Version: 0.3
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
metadata_file.close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
| mit |
tarikkdiry/Flock | flask/lib/python2.7/site-packages/click/_termui_impl.py | 136 | 16395 | """
click._termui_impl
~~~~~~~~~~~~~~~~~~
This module contains implementations for the termui module. To keep the
import time of Click down, some infrequently used functionality is placed
in this module and only imported as needed.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import time
import math
from ._compat import _default_text_stdout, range_type, PY2, isatty, \
open_stream, strip_ansi, term_len, get_best_encoding, WIN
from .utils import echo
from .exceptions import ClickException
if os.name == 'nt':
BEFORE_BAR = '\r'
AFTER_BAR = '\n'
else:
BEFORE_BAR = '\r\033[?25l'
AFTER_BAR = '\033[?25h\n'
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, (int, long)) or \
hint < 0:
return None
return hint
class ProgressBar(object):
def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
bar_template='%(bar)s', info_sep=' ', show_eta=True,
show_percent=None, show_pos=False, item_show_func=None,
label=None, file=None, color=None, width=30):
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ''
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.width = width
self.autowidth = width == 0
if length is None:
length = _length_hint(iterable)
if iterable is None:
if length is None:
raise TypeError('iterable or length is required')
iterable = range_type(length)
self.iter = iter(iterable)
self.length = length
self.length_known = length is not None
self.pos = 0
self.avg = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width = None
self.entered = False
self.current_item = None
self.is_hidden = not isatty(self.file)
self._last_line = None
def __enter__(self):
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb):
self.render_finish()
def __iter__(self):
if not self.entered:
raise RuntimeError('You need to use progress bars in a with block.')
self.render_progress()
return self
def render_finish(self):
if self.is_hidden:
return
self.file.write(AFTER_BAR)
self.file.flush()
@property
def pct(self):
if self.finished:
return 1.0
return min(self.pos / (float(self.length) or 1), 1.0)
@property
def time_per_iteration(self):
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
@property
def eta(self):
if self.length_known and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self):
if self.eta_known:
t = self.eta + 1
seconds = t % 60
t /= 60
minutes = t % 60
t /= 60
hours = t % 24
t /= 24
if t > 0:
days = t
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
return ''
def format_pos(self):
pos = str(self.pos)
if self.length_known:
pos += '/%s' % self.length
return pos
def format_pct(self):
return ('% 4d%%' % int(self.pct * 100))[1:]
def format_progress_line(self):
show_percent = self.show_percent
info_bits = []
if self.length_known:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
if show_percent is None:
show_percent = not self.show_pos
else:
if self.finished:
bar = self.fill_char * self.width
else:
bar = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
bar[int((math.cos(self.pos * self.time_per_iteration)
/ 2.0 + 0.5) * self.width)] = self.fill_char
bar = ''.join(bar)
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (self.bar_template % {
'label': self.label,
'bar': bar,
'info': self.info_sep.join(info_bits)
}).rstrip()
def render_progress(self):
from .termui import get_terminal_size
nl = False
if self.is_hidden:
buf = [self.label]
nl = True
else:
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, get_terminal_size()[0] - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(' ' * self.max_width)
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(' ' * (clear_width - line_len))
line = ''.join(buf)
# Render the line only if it changed.
if line != self._last_line:
self._last_line = line
echo(line, file=self.file, color=self.color, nl=nl)
self.file.flush()
def make_step(self, n_steps):
self.pos += n_steps
if self.length_known and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
self.eta_known = self.length_known
def update(self, n_steps):
self.make_step(n_steps)
self.render_progress()
def finish(self):
self.eta_known = 0
self.current_item = None
self.finished = True
def next(self):
if self.is_hidden:
return next(self.iter)
try:
rv = next(self.iter)
self.current_item = rv
except StopIteration:
self.finish()
self.render_progress()
raise StopIteration()
else:
self.update(1)
return rv
if not PY2:
__next__ = next
del next
def pager(text, color=None):
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, text, color)
pager_cmd = (os.environ.get('PAGER', None) or '').strip()
if pager_cmd:
if WIN:
return _tempfilepager(text, pager_cmd, color)
return _pipepager(text, pager_cmd, color)
if os.environ.get('TERM') in ('dumb', 'emacs'):
return _nullpager(stdout, text, color)
if WIN or sys.platform.startswith('os2'):
return _tempfilepager(text, 'more <', color)
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return _pipepager(text, 'less', color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return _pipepager(text, 'more', color)
return _nullpager(stdout, text, color)
finally:
os.unlink(filename)
def _pipepager(text, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit('/', 1)[-1].split()
if color is None and cmd_detail[0] == 'less':
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
if not less_flags:
env['LESS'] = '-R'
color = True
elif 'r' in less_flags or 'R' in less_flags:
color = True
if not color:
text = strip_ansi(text)
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
env=env)
encoding = get_best_encoding(c.stdin)
try:
c.stdin.write(text.encode(encoding, 'replace'))
c.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
def _tempfilepager(text, cmd, color):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
if not color:
text = strip_ansi(text)
encoding = get_best_encoding(sys.stdout)
with open_stream(filename, 'wb')[0] as f:
f.write(text.encode(encoding))
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def _nullpager(stream, text, color):
"""Simply print unformatted text. This is the ultimate fallback."""
if not color:
text = strip_ansi(text)
stream.write(text)
class Editor(object):
def __init__(self, editor=None, env=None, require_save=True,
extension='.txt'):
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self):
if self.editor is not None:
return self.editor
for key in 'VISUAL', 'EDITOR':
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return 'notepad'
for editor in 'vim', 'nano':
if os.system('which %s >/dev/null 2>&1' % editor) == 0:
return editor
return 'vi'
def edit_file(self, filename):
import subprocess
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
c = subprocess.Popen('%s "%s"' % (editor, filename),
env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise ClickException('%s: Editing failed!' % editor)
except OSError as e:
raise ClickException('%s: Editing failed: %s' % (editor, e))
def edit(self, text):
import tempfile
text = text or ''
if text and not text.endswith('\n'):
text += '\n'
fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
try:
if WIN:
encoding = 'utf-8-sig'
text = text.replace('\n', '\r\n')
else:
encoding = 'utf-8'
text = text.encode(encoding)
f = os.fdopen(fd, 'wb')
f.write(text)
f.close()
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save \
and os.path.getmtime(name) == timestamp:
return None
f = open(name, 'rb')
try:
rv = f.read()
finally:
f.close()
return rv.decode('utf-8-sig').replace('\r\n', '\n')
finally:
os.unlink(name)
def open_url(url, wait=False, locate=False):
import subprocess
def _unquote_file(url):
try:
import urllib
except ImportError:
import urllib
if url.startswith('file://'):
url = urllib.unquote(url[7:])
return url
if sys.platform == 'darwin':
args = ['open']
if wait:
args.append('-W')
if locate:
args.append('-R')
args.append(_unquote_file(url))
null = open('/dev/null', 'w')
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url)
args = 'explorer /select,"%s"' % _unquote_file(
url.replace('"', ''))
else:
args = 'start %s "" "%s"' % (
wait and '/WAIT' or '', url.replace('"', ''))
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or '.'
else:
url = _unquote_file(url)
c = subprocess.Popen(['xdg-open', url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(('http://', 'https://')) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
def _translate_ch_to_exc(ch):
if ch == '\x03':
raise KeyboardInterrupt()
if ch == '\x04':
raise EOFError()
if WIN:
import msvcrt
def getchar(echo):
rv = msvcrt.getch()
if echo:
msvcrt.putchar(rv)
_translate_ch_to_exc(rv)
if PY2:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
rv = rv.decode(enc, 'replace')
else:
rv = rv.decode('cp1252', 'replace')
return rv
else:
import tty
import termios
def getchar(echo):
if not isatty(sys.stdin):
f = open('/dev/tty')
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = os.read(fd, 32)
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
sys.stdout.flush()
if f is not None:
f.close()
except termios.error:
pass
_translate_ch_to_exc(ch)
return ch.decode(get_best_encoding(sys.stdin), 'replace')
| bsd-3-clause |
technicalpickles/zulip | zerver/lib/timeout.py | 115 | 3045 | from __future__ import absolute_import
import sys
import time
import ctypes
import threading
# Based on http://code.activestate.com/recipes/483752/
class TimeoutExpired(Exception):
'''Exception raised when a function times out.'''
def __str__(self):
return 'Function call timed out.'
def timeout(timeout, func, *args, **kwargs):
'''Call the function in a separate thread.
Return its return value, or raise an exception,
within approximately 'timeout' seconds.
The function may receive a TimeoutExpired exception
anywhere in its code, which could have arbitrary
unsafe effects (resources not released, etc.).
It might also fail to receive the exception and
keep running in the background even though
timeout() has returned.
This may also fail to interrupt functions which are
stuck in a long-running primitive interpreter
operation.'''
class TimeoutThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.exc_info = None
# Don't block the whole program from exiting
# if this is the only thread left.
self.daemon = True
def run(self):
try:
self.result = func(*args, **kwargs)
except BaseException:
self.exc_info = sys.exc_info()
def raise_async_timeout(self):
# Called from another thread.
# Attempt to raise a TimeoutExpired in the thread represented by 'self'.
tid = ctypes.c_long(self.ident)
result = ctypes.pythonapi.PyThreadState_SetAsyncExc(
tid, ctypes.py_object(TimeoutExpired))
if result > 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
#
# I was unable to find the actual source of this quote, but it
# appears in the many projects across the Internet that have
# copy-pasted this recipe.
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
thread = TimeoutThread()
thread.start()
thread.join(timeout)
if thread.isAlive():
# Gamely try to kill the thread, following the dodgy approach from
# http://stackoverflow.com/a/325528/90777
#
# We need to retry, because an async exception received while the
# thread is in a system call is simply ignored.
for i in xrange(10):
thread.raise_async_timeout()
time.sleep(0.1)
if not thread.isAlive():
break
raise TimeoutExpired
if thread.exc_info:
# Raise the original stack trace so our error messages are more useful.
# from http://stackoverflow.com/a/4785766/90777
raise thread.exc_info[0], thread.exc_info[1], thread.exc_info[2]
return thread.result
| apache-2.0 |
zzhhui/PJSip-CSharp | tests/automated/run_continuous.py | 100 | 3442 | #!/usr/bin/python
import os
import sys
import time
import datetime
import ccdash
INTERVAL = 300
DELAY = 0
ONCE = False
SUFFIX = ""
FORCE = False
def run_scenarios(scenarios, group):
# Run each scenario
rc = 0
for scenario in scenarios:
argv = []
argv.append("ccdash.py")
argv.append("scenario")
argv.append(scenario)
argv.append("--group")
argv.append(group)
thisrc = ccdash.main(argv)
if rc==0 and thisrc:
rc = thisrc
return rc
def usage():
print """Periodically monitor working directory for Continuous and Nightly builds
Usage:
run_continuous.py [options] scenario1.xml [scenario2.xml ...]
options:
These are options which will be processed by run_continuous.py:
--delay MIN Delay both Continuous and Nightly builds by MIN minutes.
This is useful to coordinate the build with other build
machines. By default, Continuous build will be done right
after changes are detected, and Nightly build will be done
at 00:00 GMT. MIN is a float number.
--once Just run one loop to see if anything needs to be done and
if so just do it once. Quit after that.
--suffix SFX Set group suffix to SFX. For example, if SFX is "-2.x", then
tests will be submitted to "Nightly-2.x", "Continuous-2.x",
and "Experimental-2.x"
--force Force running the test even when nothing has changed.
"""
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv)<=1 or sys.argv[1]=="-h" or sys.argv[1]=="--h" or sys.argv[1]=="--help" or sys.argv[1]=="/h":
usage()
# Splice list
scenarios = []
i = 1
while i < len(sys.argv):
if sys.argv[i]=="--delay":
i = i + 1
if i >= len(sys.argv):
print "Error: missing argument"
sys.exit(1)
DELAY = float(sys.argv[i]) * 60
print "Delay is set to %f minute(s)" % (DELAY / 60)
elif sys.argv[i]=="--suffix":
i = i + 1
if i >= len(sys.argv):
print "Error: missing argument"
sys.exit(1)
SUFFIX = sys.argv[i]
print "Suffix is set to %s" % (SUFFIX)
elif sys.argv[i]=="--once":
ONCE = True
elif sys.argv[i]=="--force":
FORCE = True
else:
# Check if scenario exists
scenario = sys.argv[i]
if not os.path.exists(scenario):
print "Error: file " + scenario + " does not exist"
sys.exit(1)
scenarios.append(scenario)
print "Scenario %s added" % (scenario)
i = i + 1
if len(scenarios) < 1:
print "Error: scenario is required"
sys.exit(1)
# Current date
utc = time.gmtime(None)
day = utc.tm_mday
# Loop foreva
while True:
argv = []
# Anything changed recently?
argv.append("ccdash.py")
argv.append("status")
argv.append("-w")
argv.append("../..")
rc = ccdash.main(argv)
utc = time.gmtime(None)
if utc.tm_mday != day or rc != 0 or FORCE:
group = ""
if utc.tm_mday != day:
day = utc.tm_mday
group = "Nightly" + SUFFIX
elif rc != 0:
group = "Continuous" + SUFFIX
else:
group = "Experimental" + SUFFIX
if DELAY > 0:
print "Will run %s after %f s.." % (group, DELAY)
time.sleep(DELAY)
rc = run_scenarios(scenarios, group)
msg = str(datetime.datetime.now()) + \
": done running " + group + \
"tests, will check again in " + str(INTERVAL) + "s.."
if ONCE:
sys.exit(0)
else:
# Nothing changed
msg = str(datetime.datetime.now()) + \
": No update, will check again in " + str(INTERVAL) + "s.."
if ONCE:
sys.exit(1)
print msg
time.sleep(INTERVAL)
| gpl-2.0 |
cmdblock/pmip6ns3.13new | src/tools/bindings/modulegen__gcc_ILP32.py | 32 | 172830 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.tools', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation [class]
module.add_class('DelayJitterEstimation')
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## gnuplot.h (module 'tools'): ns3::Gnuplot [class]
module.add_class('Gnuplot')
## gnuplot.h (module 'tools'): ns3::GnuplotCollection [class]
module.add_class('GnuplotCollection')
## gnuplot.h (module 'tools'): ns3::GnuplotDataset [class]
module.add_class('GnuplotDataset')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset [class]
module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Style [enumeration]
module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::ErrorBars [enumeration]
module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction [class]
module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset [class]
module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction [class]
module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DelayJitterEstimation_methods(root_module, root_module['ns3::DelayJitterEstimation'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot'])
register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection'])
register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset'])
register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction'])
register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset'])
register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DelayJitterEstimation_methods(root_module, cls):
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation(ns3::DelayJitterEstimation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DelayJitterEstimation const &', 'arg0')])
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation() [constructor]
cls.add_constructor([])
## delay-jitter-estimation.h (module 'tools'): ns3::Time ns3::DelayJitterEstimation::GetLastDelay() const [member function]
cls.add_method('GetLastDelay',
'ns3::Time',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): uint64_t ns3::DelayJitterEstimation::GetLastJitter() const [member function]
cls.add_method('GetLastJitter',
'uint64_t',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): static void ns3::DelayJitterEstimation::PrepareTx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('PrepareTx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')],
is_static=True)
## delay-jitter-estimation.h (module 'tools'): void ns3::DelayJitterEstimation::RecordRx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('RecordRx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'tools'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Gnuplot_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function]
cls.add_method('AddDataset',
'void',
[param('ns3::GnuplotDataset const &', 'dataset')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function]
cls.add_method('DetectTerminal',
'std::string',
[param('std::string const &', 'filename')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
return
def register_Ns3GnuplotCollection_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function]
cls.add_method('AddPlot',
'void',
[param('ns3::Gnuplot const &', 'plot')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function]
cls.add_method('GetPlot',
'ns3::Gnuplot &',
[param('unsigned int', 'id')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
return
def register_Ns3GnuplotDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [copy constructor]
cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')])
## gnuplot.h (module 'tools'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function]
cls.add_method('SetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Gnuplot2dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')])
return
def register_Ns3Gnuplot2dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Gnuplot3dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'z')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('std::string const &', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function]
cls.add_method('SetStyle',
'void',
[param('std::string const &', 'style')])
return
def register_Ns3Gnuplot3dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
janslow/boto | tests/unit/s3/test_website.py | 114 | 9219 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
import xml.dom.minidom
import xml.sax
from boto.s3.website import WebsiteConfiguration
from boto.s3.website import RedirectLocation
from boto.s3.website import RoutingRules
from boto.s3.website import Condition
from boto.s3.website import RoutingRules
from boto.s3.website import RoutingRule
from boto.s3.website import Redirect
from boto import handler
def pretty_print_xml(text):
text = ''.join(t.strip() for t in text.splitlines())
x = xml.dom.minidom.parseString(text)
return x.toprettyxml()
class TestS3WebsiteConfiguration(unittest.TestCase):
maxDiff = None
def setUp(self):
pass
def tearDown(self):
pass
def test_suffix_only(self):
config = WebsiteConfiguration(suffix='index.html')
xml = config.to_xml()
self.assertIn(
'<IndexDocument><Suffix>index.html</Suffix></IndexDocument>', xml)
def test_suffix_and_error(self):
config = WebsiteConfiguration(suffix='index.html',
error_key='error.html')
xml = config.to_xml()
self.assertIn(
'<ErrorDocument><Key>error.html</Key></ErrorDocument>', xml)
def test_redirect_all_request_to_with_just_host(self):
location = RedirectLocation(hostname='example.com')
config = WebsiteConfiguration(redirect_all_requests_to=location)
xml = config.to_xml()
self.assertIn(
('<RedirectAllRequestsTo><HostName>'
'example.com</HostName></RedirectAllRequestsTo>'), xml)
def test_redirect_all_requests_with_protocol(self):
location = RedirectLocation(hostname='example.com', protocol='https')
config = WebsiteConfiguration(redirect_all_requests_to=location)
xml = config.to_xml()
self.assertIn(
('<RedirectAllRequestsTo><HostName>'
'example.com</HostName><Protocol>https</Protocol>'
'</RedirectAllRequestsTo>'), xml)
def test_routing_rules_key_prefix(self):
x = pretty_print_xml
# This rule redirects requests for docs/* to documentation/*
rules = RoutingRules()
condition = Condition(key_prefix='docs/')
redirect = Redirect(replace_key_prefix='documents/')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>docs/</KeyPrefixEquals>
</Condition>
<Redirect>
<ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_routing_rules_to_host_on_404(self):
x = pretty_print_xml
# Another example from the docs:
# Redirect requests to a specific host in the event of a 404.
# Also, the redirect inserts a report-404/. For example,
# if you request a page ExamplePage.html and it results
# in a 404, the request is routed to a page report-404/ExamplePage.html
rules = RoutingRules()
condition = Condition(http_error_code=404)
redirect = Redirect(hostname='example.com',
replace_key_prefix='report-404/')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
</Condition>
<Redirect>
<HostName>example.com</HostName>
<ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_key_prefix(self):
x = pretty_print_xml
rules = RoutingRules()
condition = Condition(key_prefix="images/")
redirect = Redirect(replace_key='folderdeleted.html')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>images/</KeyPrefixEquals>
</Condition>
<Redirect>
<ReplaceKeyWith>folderdeleted.html</ReplaceKeyWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_builders(self):
x = pretty_print_xml
# This is a more declarative way to create rules.
# First the long way.
rules = RoutingRules()
condition = Condition(http_error_code=404)
redirect = Redirect(hostname='example.com',
replace_key_prefix='report-404/')
rules.add_rule(RoutingRule(condition, redirect))
xml = rules.to_xml()
# Then the more concise way.
rules2 = RoutingRules().add_rule(
RoutingRule.when(http_error_code=404).then_redirect(
hostname='example.com', replace_key_prefix='report-404/'))
xml2 = rules2.to_xml()
self.assertEqual(x(xml), x(xml2))
def test_parse_xml(self):
x = pretty_print_xml
xml_in = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<ErrorDocument>
<Key>error.html</Key>
</ErrorDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>docs/</KeyPrefixEquals>
</Condition>
<Redirect>
<Protocol>https</Protocol>
<HostName>www.example.com</HostName>
<ReplaceKeyWith>documents/</ReplaceKeyWith>
<HttpRedirectCode>302</HttpRedirectCode>
</Redirect>
</RoutingRule>
<RoutingRule>
<Condition>
<HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
</Condition>
<Redirect>
<HostName>example.com</HostName>
<ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
webconfig = WebsiteConfiguration()
h = handler.XmlHandler(webconfig, None)
xml.sax.parseString(xml_in.encode('utf-8'), h)
xml_out = webconfig.to_xml()
self.assertEqual(x(xml_in), x(xml_out))
| mit |
stbka/ansible | lib/ansible/plugins/inventory/__init__.py | 162 | 2712 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from six import with_metaclass
class InventoryParser(with_metaclass(ABCMeta, object)):
'''Abstract Base Class for retrieving inventory information
Any InventoryParser functions by taking an inven_source. The caller then
calls the parser() method. Once parser is called, the caller can access
InventoryParser.hosts for a mapping of Host objects and
InventoryParser.Groups for a mapping of Group objects.
'''
def __init__(self, inven_source):
'''
InventoryParser contructors take a source of inventory information
that they will parse the host and group information from.
'''
self.inven_source = inven_source
self.reset_parser()
@abstractmethod
def reset_parser(self):
'''
InventoryParsers generally cache their data once parser() is
called. This method initializes any parser state before calling parser
again.
'''
self.hosts = dict()
self.groups = dict()
self.parsed = False
def _merge(self, target, addition):
'''
This method is provided to InventoryParsers to merge host or group
dicts since it may take several passes to get all of the data
Example usage:
self.hosts = self.from_ini(filename)
new_hosts = self.from_script(scriptname)
self._merge(self.hosts, new_hosts)
'''
for i in addition:
if i in target:
target[i].merge(addition[i])
else:
target[i] = addition[i]
@abstractmethod
def parse(self, refresh=False):
if refresh:
self.reset_parser()
if self.parsed:
return self.parsed
# Parse self.inven_sources here
pass
| gpl-3.0 |
larsoner/mne-python | mne/decoding/tests/test_base.py | 12 | 15702 | # Author: Jean-Remi King, <jeanremi.king@gmail.com>
# Marijn van Vliet, <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_allclose, assert_array_less)
import pytest
from mne import create_info, EpochsArray
from mne.fixes import is_regressor, is_classifier
from mne.utils import requires_sklearn, requires_version
from mne.decoding.base import (_get_inverse_funcs, LinearModel, get_coef,
cross_val_multiscore, BaseEstimator)
from mne.decoding.search_light import SlidingEstimator
from mne.decoding import (Scaler, TransformerMixin, Vectorizer,
GeneralizingEstimator)
def _make_data(n_samples=1000, n_features=5, n_targets=3):
"""Generate some testing data.
Parameters
----------
n_samples : int
The number of samples.
n_features : int
The number of features.
n_targets : int
The number of targets.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The measured data.
Y : ndarray, shape (n_samples, n_targets)
The latent variables generating the data.
A : ndarray, shape (n_features, n_targets)
The forward model, mapping the latent variables (=Y) to the measured
data (=X).
"""
# Define Y latent factors
np.random.seed(0)
cov_Y = np.eye(n_targets) * 10 + np.random.rand(n_targets, n_targets)
cov_Y = (cov_Y + cov_Y.T) / 2.
mean_Y = np.random.rand(n_targets)
Y = np.random.multivariate_normal(mean_Y, cov_Y, size=n_samples)
# The Forward model
A = np.random.randn(n_features, n_targets)
X = Y.dot(A.T)
X += np.random.randn(n_samples, n_features) # add noise
X += np.random.rand(n_features) # Put an offset
return X, Y, A
@requires_sklearn
def test_get_coef():
"""Test getting linear coefficients (filters/patterns) from estimators."""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
lm_classification = LinearModel()
assert (is_classifier(lm_classification))
lm_regression = LinearModel(Ridge())
assert (is_regressor(lm_regression))
parameters = {'kernel': ['linear'], 'C': [1, 10]}
lm_gs_classification = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_classifier(lm_gs_classification))
lm_gs_regression = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_regressor(lm_gs_regression))
# Define a classifier, an invertible transformer and an non-invertible one.
class Clf(BaseEstimator):
def fit(self, X, y):
return self
class NoInv(TransformerMixin):
def fit(self, X, y):
return self
def transform(self, X):
return X
class Inv(NoInv):
def inverse_transform(self, X):
return X
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# I. Test inverse function
# Check that we retrieve the right number of inverse functions even if
# there are nested pipelines
good_estimators = [
(1, make_pipeline(Inv(), Clf())),
(2, make_pipeline(Inv(), Inv(), Clf())),
(3, make_pipeline(Inv(), make_pipeline(Inv(), Inv()), Clf())),
]
for expected_n, est in good_estimators:
est.fit(X, y)
assert (expected_n == len(_get_inverse_funcs(est)))
bad_estimators = [
Clf(), # no preprocessing
Inv(), # final estimator isn't classifier
make_pipeline(NoInv(), Clf()), # first step isn't invertible
make_pipeline(Inv(), make_pipeline(
Inv(), NoInv()), Clf()), # nested step isn't invertible
]
for est in bad_estimators:
est.fit(X, y)
invs = _get_inverse_funcs(est)
assert_equal(invs, list())
# II. Test get coef for classification/regression estimators and pipelines
rng = np.random.RandomState(0)
for clf in (lm_regression,
lm_gs_classification,
make_pipeline(StandardScaler(), lm_classification),
make_pipeline(StandardScaler(), lm_gs_regression)):
# generate some categorical/continuous data
# according to the type of estimator.
if is_classifier(clf):
n, n_features = 1000, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
else:
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
y = np.ravel(y)
clf.fit(X, y)
# Retrieve final linear model
filters = get_coef(clf, 'filters_', False)
if hasattr(clf, 'steps'):
if hasattr(clf.steps[-1][-1].model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.steps[-1][-1].model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.steps[-1][-1].model.coef_
else:
if hasattr(clf.model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.model.coef_
if coefs.ndim == 2 and coefs.shape[0] == 1:
coefs = coefs[0]
assert_array_equal(filters, coefs)
patterns = get_coef(clf, 'patterns_', False)
assert (filters[0] != patterns[0])
n_chans = X.shape[1]
assert_array_equal(filters.shape, patterns.shape, [n_chans, n_chans])
# Inverse transform linear model
filters_inv = get_coef(clf, 'filters_', True)
assert (filters[0] != filters_inv[0])
patterns_inv = get_coef(clf, 'patterns_', True)
assert (patterns[0] != patterns_inv[0])
class _Noop(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return X.copy()
inverse_transform = transform
@requires_sklearn
@pytest.mark.parametrize('inverse', (True, False))
@pytest.mark.parametrize('Scale, kwargs', [
(Scaler, dict(info=None, scalings='mean')),
(_Noop, dict()),
])
def test_get_coef_inverse_transform(inverse, Scale, kwargs):
"""Test get_coef with and without inverse_transform."""
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_pipeline
lm_regression = LinearModel(Ridge())
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# Check with search_light and combination of preprocessing ending with sl:
# slider = SlidingEstimator(make_pipeline(StandardScaler(), lm_regression))
# XXX : line above should work but does not as only last step is
# used in get_coef ...
slider = SlidingEstimator(make_pipeline(lm_regression))
X = np.transpose([X, -X], [1, 2, 0]) # invert X across 2 time samples
clf = make_pipeline(Scale(**kwargs), slider)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse)
filters = get_coef(clf, 'filters_', inverse)
assert_array_equal(filters.shape, patterns.shape, X.shape[1:])
# the two time samples get inverted patterns
assert_equal(patterns[0, 0], -patterns[0, 1])
for t in [0, 1]:
filters_t = get_coef(
clf.named_steps['slidingestimator'].estimators_[t],
'filters_', False)
if Scale is _Noop:
assert_array_equal(filters_t, filters[:, t])
@requires_sklearn
@pytest.mark.parametrize('n_features', [1, 5])
@pytest.mark.parametrize('n_targets', [1, 3])
def test_get_coef_multiclass(n_features, n_targets):
"""Test get_coef on multiclass problems."""
# Check patterns with more than 1 regressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.pipeline import make_pipeline
X, Y, A = _make_data(
n_samples=30000, n_features=n_features, n_targets=n_targets)
lm = LinearModel(LinearRegression()).fit(X, Y)
assert_array_equal(lm.filters_.shape, lm.patterns_.shape)
if n_targets == 1:
want_shape = (n_features,)
else:
want_shape = (n_targets, n_features)
assert_array_equal(lm.filters_.shape, want_shape)
if n_features > 1 and n_targets > 1:
assert_array_almost_equal(A, lm.patterns_.T, decimal=2)
lm = LinearModel(Ridge(alpha=0))
clf = make_pipeline(lm)
clf.fit(X, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
assert_allclose(lm.patterns_, coef, atol=1e-5)
# With epochs, scaler, and vectorizer (typical use case)
X_epo = X.reshape(X.shape + (1,))
info = create_info(n_features, 1000., 'eeg')
lm = LinearModel(Ridge(alpha=1))
clf = make_pipeline(
Scaler(info, scalings=dict(eeg=1.)), # XXX adding this step breaks
Vectorizer(),
lm,
)
clf.fit(X_epo, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
lm_patterns_ = lm.patterns_[..., np.newaxis]
assert_allclose(lm_patterns_, coef, atol=1e-5)
# Check can pass fitting parameters
lm.fit(X, Y, sample_weight=np.ones(len(Y)))
@requires_version('sklearn', '0.22') # roc_auc_ovr_weighted
@pytest.mark.parametrize('n_classes, n_channels, n_times', [
(4, 10, 2),
(4, 3, 2),
(3, 2, 1),
(3, 1, 2),
])
def test_get_coef_multiclass_full(n_classes, n_channels, n_times):
"""Test a full example with pattern extraction."""
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
data = np.zeros((10 * n_classes, n_channels, n_times))
# Make only the first channel informative
for ii in range(n_classes):
data[ii * 10:(ii + 1) * 10, 0] = ii
events = np.zeros((len(data), 3), int)
events[:, 0] = np.arange(len(events))
events[:, 2] = data[:, 0, 0]
info = create_info(n_channels, 1000., 'eeg')
epochs = EpochsArray(data, info, events, tmin=0)
clf = make_pipeline(
Scaler(epochs.info), Vectorizer(),
LinearModel(LogisticRegression(random_state=0, multi_class='ovr')),
)
scorer = 'roc_auc_ovr_weighted'
time_gen = GeneralizingEstimator(clf, scorer, verbose=True)
X = epochs.get_data()
y = epochs.events[:, 2]
n_splits = 3
cv = StratifiedKFold(n_splits=n_splits)
scores = cross_val_multiscore(time_gen, X, y, cv=cv, verbose=True)
want = (n_splits,)
if n_times > 1:
want += (n_times, n_times)
assert scores.shape == want
assert_array_less(0.8, scores)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse_transform=True)
assert patterns.shape == (n_classes, n_channels, n_times)
assert_allclose(patterns[:, 1:], 0., atol=1e-7) # no other channels useful
@requires_sklearn
def test_linearmodel():
"""Test LinearModel class for computing filters and patterns."""
# check categorical target fit in standard linear model
from sklearn.linear_model import LinearRegression
rng = np.random.RandomState(0)
clf = LinearModel()
n, n_features = 20, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check categorical target fit in standard linear model with GridSearchCV
from sklearn import svm
from sklearn.model_selection import GridSearchCV
parameters = {'kernel': ['linear'], 'C': [1, 10]}
clf = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check continuous target fit in standard linear model with GridSearchCV
n_targets = 1
Y = rng.rand(n, n_targets)
clf = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features, ))
assert_equal(clf.patterns_.shape, (n_features, ))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
# check multi-target fit in standard linear model
n_targets = 5
Y = rng.rand(n, n_targets)
clf = LinearModel(LinearRegression())
clf.fit(X, Y)
assert_equal(clf.filters_.shape, (n_targets, n_features))
assert_equal(clf.patterns_.shape, (n_targets, n_features))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
@requires_sklearn
def test_cross_val_multiscore():
"""Test cross_val_multiscore for computing scores on decoding over time."""
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.linear_model import LogisticRegression, LinearRegression
logreg = LogisticRegression(solver='liblinear', random_state=0)
# compare to cross-val-score
X = np.random.rand(20, 3)
y = np.arange(20) % 2
cv = KFold(2, random_state=0, shuffle=True)
clf = logreg
assert_array_equal(cross_val_score(clf, X, y, cv=cv),
cross_val_multiscore(clf, X, y, cv=cv))
# Test with search light
X = np.random.rand(20, 4, 3)
y = np.arange(20) % 2
clf = SlidingEstimator(logreg, scoring='accuracy')
scores_acc = cross_val_multiscore(clf, X, y, cv=cv)
assert_array_equal(np.shape(scores_acc), [2, 3])
# check values
scores_acc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_acc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_acc, scores_acc_manual)
# check scoring metric
# raise an error if scoring is defined at cross-val-score level and
# search light, because search light does not return a 1-dimensional
# prediction.
pytest.raises(ValueError, cross_val_multiscore, clf, X, y, cv=cv,
scoring='roc_auc')
clf = SlidingEstimator(logreg, scoring='roc_auc')
scores_auc = cross_val_multiscore(clf, X, y, cv=cv, n_jobs=1)
scores_auc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_auc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_auc, scores_auc_manual)
# indirectly test that cross_val_multiscore rightly detects the type of
# estimator and generates a StratifiedKFold for classiers and a KFold
# otherwise
X = np.random.randn(1000, 3)
y = np.ones(1000, dtype=int)
y[::2] = 0
clf = logreg
reg = LinearRegression()
for cross_val in (cross_val_score, cross_val_multiscore):
manual = cross_val(clf, X, y, cv=StratifiedKFold(2))
auto = cross_val(clf, X, y, cv=2)
assert_array_equal(manual, auto)
manual = cross_val(reg, X, y, cv=KFold(2))
auto = cross_val(reg, X, y, cv=2)
assert_array_equal(manual, auto)
| bsd-3-clause |
aleccool213/web_course_part1 | mydjangoproject/mydjangoproject/settings.py | 1 | 2882 | """
Django settings for mydjangoproject project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_-nhdws16g8j6^frm8$y$sz%o-s#3g_jk6@_!wj#(316nem05l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storeapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mydjangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mydjangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('COURSE_DB_NAME', 'course'),
'USER': os.environ.get('COURSE_DB_USER', 'alec'),
'PASSWORD': os.environ.get('COURSE_DB_PASSWORD', ''),
'HOST': os.environ.get('COURSE_DB_HOST', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit |
imoseyon/leanKernel-d2usc-deprecated | external/webkit/Tools/Scripts/webkitpy/tool/commands/upload.py | 15 | 22489 | #!/usr/bin/env python
# Copyright (c) 2009, 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
from optparse import make_option
from webkitpy.tool import steps
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.net.bugzilla import parse_bug_id_from_changelog
from webkitpy.common.system.deprecated_logging import error, log
from webkitpy.common.system.user import User
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.comments import bug_comment_from_svn_revision
from webkitpy.tool.grammar import pluralize, join_with_separators
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
name = "commit-message"
help_text = "Print a commit message suitable for the uncommitted changes"
def __init__(self):
options = [
steps.Options.git_commit,
]
AbstractDeclarativeCommand.__init__(self, options=options)
def execute(self, options, args, tool):
# This command is a useful test to make sure commit_message_for_this_commit
# always returns the right value regardless of the current working directory.
print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message()
class CleanPendingCommit(AbstractDeclarativeCommand):
name = "clean-pending-commit"
help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list."
# NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters.
def _flags_to_clear_on_patch(self, patch):
if not patch.is_obsolete():
return None
what_was_cleared = []
if patch.review() == "+":
if patch.reviewer():
what_was_cleared.append("%s's review+" % patch.reviewer().full_name)
else:
what_was_cleared.append("review+")
return join_with_separators(what_was_cleared)
def execute(self, options, args, tool):
committers = CommitterList()
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
bug = self._tool.bugs.fetch_bug(bug_id)
patches = bug.patches(include_obsolete=True)
for patch in patches:
flags_to_clear = self._flags_to_clear_on_patch(patch)
if not flags_to_clear:
continue
message = "Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
self._tool.bugs.obsolete_attachment(patch.id(), message)
# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit
class CleanReviewQueue(AbstractDeclarativeCommand):
name = "clean-review-queue"
help_text = "Clear r? on obsolete patches so they do not appear in the pending-commit list."
def execute(self, options, args, tool):
queue_url = "http://webkit.org/pending-review"
# We do this inefficient dance to be more like webkit.org/pending-review
# bugs.queries.fetch_bug_ids_from_review_queue() doesn't return
# closed bugs, but folks using /pending-review will see them. :(
for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue():
patch = self._tool.bugs.fetch_attachment(patch_id)
if not patch.review() == "?":
continue
attachment_obsolete_modifier = ""
if patch.is_obsolete():
attachment_obsolete_modifier = "obsolete "
elif patch.bug().is_closed():
bug_closed_explanation = " If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)."
else:
# Neither the patch was obsolete or the bug was closed, next patch...
continue
message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation)
self._tool.bugs.obsolete_attachment(patch.id(), message)
class AssignToCommitter(AbstractDeclarativeCommand):
name = "assign-to-committer"
help_text = "Assign bug to whoever attached the most recent r+'d patch"
def _patches_have_commiters(self, reviewed_patches):
for patch in reviewed_patches:
if not patch.committer():
return False
return True
def _assign_bug_to_last_patch_attacher(self, bug_id):
committers = CommitterList()
bug = self._tool.bugs.fetch_bug(bug_id)
if not bug.is_unassigned():
assigned_to_email = bug.assigned_to_email()
log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
return
reviewed_patches = bug.reviewed_patches()
if not reviewed_patches:
log("Bug %s has no non-obsolete patches, ignoring." % bug_id)
return
# We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set).
if self._patches_have_commiters(reviewed_patches):
log("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id)
return
latest_patch = reviewed_patches[-1]
attacher_email = latest_patch.attacher_email()
committer = committers.committer_by_email(attacher_email)
if not committer:
log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
return
reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
def execute(self, options, args, tool):
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
self._assign_bug_to_last_patch_attacher(bug_id)
class ObsoleteAttachments(AbstractSequencedCommand):
name = "obsolete-attachments"
help_text = "Mark all attachments on a bug as obsolete"
argument_names = "BUGID"
steps = [
steps.ObsoletePatches,
]
def _prepare_state(self, options, args, tool):
return { "bug_id" : args[0] }
class AttachToBug(AbstractSequencedCommand):
name = "attach-to-bug"
help_text = "Attach the the file to the bug"
argument_names = "BUGID FILEPATH"
steps = [
steps.AttachToBug,
]
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = args[0]
state["filepath"] = args[1]
return state
class AbstractPatchUploadingCommand(AbstractSequencedCommand):
def _bug_id(self, options, args, tool, state):
# Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
bug_id = args and args[0]
if not bug_id:
changed_files = self._tool.scm().changed_files(options.git_commit)
state["changed_files"] = changed_files
bug_id = tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files)
return bug_id
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = self._bug_id(options, args, tool, state)
if not state["bug_id"]:
error("No bug id passed and no bug url found in ChangeLogs.")
return state
class Post(AbstractPatchUploadingCommand):
name = "post"
help_text = "Attach the current working directory diff to a bug as a patch file"
argument_names = "[BUGID]"
steps = [
steps.ValidateChangeLogs,
steps.CheckStyle,
steps.ConfirmDiff,
steps.ObsoletePatches,
steps.SuggestReviewers,
steps.PostDiff,
]
class LandSafely(AbstractPatchUploadingCommand):
name = "land-safely"
help_text = "Land the current diff via the commit-queue"
argument_names = "[BUGID]"
long_help = """land-safely updates the ChangeLog with the reviewer listed
in bugs.webkit.org for BUGID (or the bug ID detected from the ChangeLog).
The command then uploads the current diff to the bug and marks it for
commit by the commit-queue."""
show_in_main_help = True
steps = [
steps.UpdateChangeLogsWithReviewer,
steps.ValidateChangeLogs,
steps.ObsoletePatches,
steps.PostDiffForCommit,
]
class Prepare(AbstractSequencedCommand):
name = "prepare"
help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs"
argument_names = "[BUGID]"
steps = [
steps.PromptForBugOrTitle,
steps.CreateBug,
steps.PrepareChangeLog,
]
def _prepare_state(self, options, args, tool):
bug_id = args and args[0]
return { "bug_id" : bug_id }
class Upload(AbstractPatchUploadingCommand):
name = "upload"
help_text = "Automates the process of uploading a patch for review"
argument_names = "[BUGID]"
show_in_main_help = True
steps = [
steps.ValidateChangeLogs,
steps.CheckStyle,
steps.PromptForBugOrTitle,
steps.CreateBug,
steps.PrepareChangeLog,
steps.EditChangeLog,
steps.ConfirmDiff,
steps.ObsoletePatches,
steps.SuggestReviewers,
steps.PostDiff,
]
long_help = """upload uploads the current diff to bugs.webkit.org.
If no bug id is provided, upload will create a bug.
If the current diff does not have a ChangeLog, upload
will prepare a ChangeLog. Once a patch is read, upload
will open the ChangeLogs for editing using the command in the
EDITOR environment variable and will display the diff using the
command in the PAGER environment variable."""
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = self._bug_id(options, args, tool, state)
return state
class EditChangeLogs(AbstractSequencedCommand):
name = "edit-changelogs"
help_text = "Opens modified ChangeLogs in $EDITOR"
show_in_main_help = True
steps = [
steps.EditChangeLog,
]
class PostCommits(AbstractDeclarativeCommand):
name = "post-commits"
help_text = "Attach a range of local commits to bugs as patch files"
argument_names = "COMMITISH"
def __init__(self):
options = [
make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
steps.Options.obsolete_patches,
steps.Options.review,
steps.Options.request_commit,
]
AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True)
def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
comment_text = None
if (options.add_log_as_comment):
comment_text = commit_message.body(lstrip=True)
comment_text += "---\n"
comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
return comment_text
def execute(self, options, args, tool):
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids))))
have_obsoleted_patches = set()
for commit_id in commit_ids:
commit_message = tool.scm().commit_message_for_local_commit(commit_id)
# Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
bug_id = options.bug_id or parse_bug_id_from_changelog(commit_message.message()) or parse_bug_id_from_changelog(tool.scm().create_patch(git_commit=commit_id))
if not bug_id:
log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
continue
if options.obsolete_patches and bug_id not in have_obsoleted_patches:
state = { "bug_id": bug_id }
steps.ObsoletePatches(tool, options).run(state)
have_obsoleted_patches.add(bug_id)
diff = tool.scm().create_patch(git_commit=commit_id)
description = options.description or commit_message.description(lstrip=True, strip_url=True)
comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
# FIXME: This command needs to be brought into the modern age with steps and CommitInfo.
class MarkBugFixed(AbstractDeclarativeCommand):
name = "mark-bug-fixed"
help_text = "Mark the specified bug as fixed"
argument_names = "[SVN_REVISION]"
def __init__(self):
options = [
make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."),
make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."),
make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."),
]
AbstractDeclarativeCommand.__init__(self, options=options)
# FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here.
def _fetch_commit_log(self, tool, svn_revision):
if not svn_revision:
return tool.scm().last_svn_commit_log()
return tool.scm().svn_commit_log(svn_revision)
def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision):
commit_log = self._fetch_commit_log(tool, svn_revision)
if not bug_id:
bug_id = parse_bug_id_from_changelog(commit_log)
if not svn_revision:
match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE)
if match:
svn_revision = match.group('svn_revision')
if not bug_id or not svn_revision:
not_found = []
if not bug_id:
not_found.append("bug id")
if not svn_revision:
not_found.append("svn revision")
error("Could not find %s on command-line or in %s."
% (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit"))
return (bug_id, svn_revision)
def execute(self, options, args, tool):
bug_id = options.bug_id
svn_revision = args and args[0]
if svn_revision:
if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE):
svn_revision = svn_revision[1:]
if not re.match("^[0-9]+$", svn_revision):
error("Invalid svn revision: '%s'" % svn_revision)
needs_prompt = False
if not bug_id or not svn_revision:
needs_prompt = True
(bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision)
log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"]))
log("Revision: %s" % svn_revision)
if options.open_bug:
tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id))
if needs_prompt:
if not tool.user.confirm("Is this correct?"):
exit(1)
bug_comment = bug_comment_from_svn_revision(svn_revision)
if options.comment:
bug_comment = "%s\n\n%s" % (options.comment, bug_comment)
if options.update_only:
log("Adding comment to Bug %s." % bug_id)
tool.bugs.post_comment_to_bug(bug_id, bug_comment)
else:
log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id)
tool.bugs.close_bug_as_fixed(bug_id, bug_comment)
# FIXME: Requires unit test. Blocking issue: too complex for now.
class CreateBug(AbstractDeclarativeCommand):
name = "create-bug"
help_text = "Create a bug from local changes or local commits"
argument_names = "[COMMITISH]"
def __init__(self):
options = [
steps.Options.cc,
steps.Options.component,
make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
]
AbstractDeclarativeCommand.__init__(self, options=options)
def create_bug_from_commit(self, options, args, tool):
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
if len(commit_ids) > 3:
error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
commit_id = commit_ids[0]
bug_title = ""
comment_text = ""
if options.prompt:
(bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
else:
commit_message = tool.scm().commit_message_for_local_commit(commit_id)
bug_title = commit_message.description(lstrip=True, strip_url=True)
comment_text = commit_message.body(lstrip=True)
comment_text += "---\n"
comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
diff = tool.scm().create_patch(git_commit=commit_id)
bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
if bug_id and len(commit_ids) > 1:
options.bug_id = bug_id
options.obsolete_patches = False
# FIXME: We should pass through --no-comment switch as well.
PostCommits.execute(self, options, commit_ids[1:], tool)
def create_bug_from_patch(self, options, args, tool):
bug_title = ""
comment_text = ""
if options.prompt:
(bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
else:
commit_message = tool.checkout().commit_message_for_this_commit(options.git_commit)
bug_title = commit_message.description(lstrip=True, strip_url=True)
comment_text = commit_message.body(lstrip=True)
diff = tool.scm().create_patch(options.git_commit)
bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
def prompt_for_bug_title_and_comment(self):
bug_title = User.prompt("Bug title: ")
print "Bug comment (hit ^D on blank line to end):"
lines = sys.stdin.readlines()
try:
sys.stdin.seek(0, os.SEEK_END)
except IOError:
# Cygwin raises an Illegal Seek (errno 29) exception when the above
# seek() call is made. Ignoring it seems to cause no harm.
# FIXME: Figure out a way to get avoid the exception in the first
# place.
pass
comment_text = "".join(lines)
return (bug_title, comment_text)
def execute(self, options, args, tool):
if len(args):
if (not tool.scm().supports_local_commits()):
error("Extra arguments not supported; patch is taken from working directory.")
self.create_bug_from_commit(options, args, tool)
else:
self.create_bug_from_patch(options, args, tool)
| gpl-2.0 |
zentner-kyle/servo | tests/wpt/web-platform-tests/webdriver/navigation/auth_tests.py | 58 | 1384 | import os
import sys
import unittest
import ConfigParser
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from webdriver import exceptions
from wptserve import server
from wptserve.router import any_method
from wptserve.handlers import basic_auth_handler
class WebDriverAuthTest(unittest.TestCase):
# Set up class to start HTTP Server that responds to
# test URLs with various 401 responses
@classmethod
def setUpClass(cls):
cls.driver = base_test.create_driver()
cls.webserver = server.WebTestHttpd(routes=[(any_method, "*", basic_auth_handler)])
cls.webserver.start()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
cls.webserver.stop()
# Test that when 401 is seen by browser, a WebDriver response is still sent
def test_response_401_auth_basic(self):
page = self.webserver.get_url('navigation/res/authenticated.html')
self.driver.set_page_load_timeout(5)
try:
self.driver.get( page )
# if we got a responses instead of timeout, that's success
self.assertTrue(True)
except exceptions.TimeoutException:
self.fail("Did not get response from browser.")
except:
self.fail("Unexpected failure. Please investigate.")
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
mic4ael/indico | indico/migrations/versions/20171124_1138_2af245be72a6_review_questions_models.py | 3 | 2844 | """Add columns to review questions and review ratings tables.
Revision ID: 2af245be72a6
Revises: 566d5de4e0e5
Create Date: 2017-11-24 11:38:33.292283
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '2af245be72a6'
down_revision = '566d5de4e0e5'
branch_labels = None
depends_on = None
tables = (('event_abstracts', 'abstract_review_ratings', 'abstract_review_questions'),
('event_paper_reviewing', 'review_ratings', 'review_questions'))
def upgrade():
for schema, ratings_table, questions_table in tables:
op.alter_column(questions_table, 'text', new_column_name='title', schema=schema)
op.add_column(questions_table, sa.Column('field_type', sa.String(), nullable=False, server_default='rating'),
schema=schema)
op.alter_column(questions_table, 'field_type', server_default=None, schema=schema)
op.add_column(questions_table, sa.Column('is_required', sa.Boolean(), nullable=False, server_default='true'),
schema=schema)
op.alter_column(questions_table, 'is_required', server_default=None, schema=schema)
op.add_column(questions_table, sa.Column('field_data', sa.JSON(), nullable=False, server_default='{}'),
schema=schema)
op.alter_column(questions_table, 'field_data', server_default=None, schema=schema)
op.add_column(questions_table, sa.Column('description', sa.Text(), nullable=False, server_default=''),
schema=schema)
op.alter_column(questions_table, 'description', server_default=None, schema=schema)
op.execute('ALTER TABLE {0}.{1} ALTER COLUMN "value" TYPE JSON USING to_json(value)'.format(schema,
ratings_table))
def downgrade():
for schema, ratings_table, questions_table in tables:
op.alter_column(questions_table, 'title', new_column_name='text', schema=schema)
op.execute("DELETE FROM {0}.{1} WHERE question_id IN(SELECT id FROM {0}.{2} "
"WHERE field_type != 'rating' OR NOT is_required)".format(schema, ratings_table, questions_table))
op.execute("DELETE FROM {0}.{1} WHERE field_type != 'rating'".format(schema, questions_table))
op.execute('ALTER TABLE {0}.{1} ALTER COLUMN "value" TYPE INT USING value::TEXT::INT'.format(schema,
ratings_table))
op.drop_column(questions_table, 'field_type', schema=schema)
op.drop_column(questions_table, 'is_required', schema=schema)
op.drop_column(questions_table, 'field_data', schema=schema)
op.drop_column(questions_table, 'description', schema=schema)
| mit |
sriprasanna/django-1.3.1 | tests/modeltests/fixtures/models.py | 51 | 3102 | """
37. Fixtures.
Fixtures are a way of loading data into the database in bulk. Fixure data
can be stored in any serializable format (including JSON and XML). Fixtures
are identified by name, and are stored in either a directory named 'fixtures'
in the application directory, or in one of the directories named in the
``FIXTURE_DIRS`` setting.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models, DEFAULT_DB_ALIAS
from django.conf import settings
class Category(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.title
class Meta:
ordering = ('title',)
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __unicode__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
class Blog(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(Article, related_name='fixtures_featured_set')
articles = models.ManyToManyField(Article, blank=True,
related_name='fixtures_articles_set')
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(ContentType, related_name="fixtures_tag_set")
tagged_id = models.PositiveIntegerField(default=0)
tagged = generic.GenericForeignKey(ct_field='tagged_type',
fk_field='tagged_id')
def __unicode__(self):
return '<%s: %s> tagged "%s"' % (self.tagged.__class__.__name__,
self.tagged, self.name)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
def natural_key(self):
return (self.name,)
class SpyManager(PersonManager):
def get_query_set(self):
return super(SpyManager, self).get_query_set().filter(cover_blown=False)
class Spy(Person):
objects = SpyManager()
cover_blown = models.BooleanField(default=False)
class Visa(models.Model):
person = models.ForeignKey(Person)
permissions = models.ManyToManyField(Permission, blank=True)
def __unicode__(self):
return '%s %s' % (self.person.name,
', '.join(p.name for p in self.permissions.all()))
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Person)
def __unicode__(self):
return '%s by %s' % (self.name,
' and '.join(a.name for a in self.authors.all()))
class Meta:
ordering = ('name',)
| bsd-3-clause |
kjw0106/boto | boto/pyami/config.py | 95 | 8016 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import re
import warnings
import boto
from boto.compat import expanduser, ConfigParser, StringIO
# By default we use two locations for the boto configurations,
# /etc/boto.cfg and ~/.boto (which works on Windows and Unix).
BotoConfigPath = '/etc/boto.cfg'
BotoConfigLocations = [BotoConfigPath]
UserConfigPath = os.path.join(expanduser('~'), '.boto')
BotoConfigLocations.append(UserConfigPath)
# If there's a BOTO_CONFIG variable set, we load ONLY
# that variable
if 'BOTO_CONFIG' in os.environ:
BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])]
# If there's a BOTO_PATH variable set, we use anything there
# as the current configuration locations, split with os.pathsep.
elif 'BOTO_PATH' in os.environ:
BotoConfigLocations = []
for path in os.environ['BOTO_PATH'].split(os.pathsep):
BotoConfigLocations.append(expanduser(path))
class Config(ConfigParser):
def __init__(self, path=None, fp=None, do_load=True):
# We don't use ``super`` here, because ``ConfigParser`` still uses
# old-style classes.
ConfigParser.__init__(self, {'working_dir': '/mnt/pyami',
'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except:
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except:
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except:
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
val = ConfigParser.get(self, section, name)
except:
val = default
return val
def getint(self, section, name, default=0):
try:
val = ConfigParser.getint(self, section, name)
except:
val = int(default)
return val
def getfloat(self, section, name, default=0.0):
try:
val = ConfigParser.getfloat(self, section, name)
except:
val = float(default)
return val
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO()
self.write(s)
print(s.getvalue())
def dump_safe(self, fp=None):
if not fp:
fp = StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
| mit |
psav/cfme_tests | cfme/containers/provider/__init__.py | 1 | 25790 | import attr
import random
from random import sample
from traceback import format_exc
import re
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic_manageiq import StatusBox, ContainerSummaryTable
from widgetastic.utils import VersionPick, Version
from widgetastic.widget import Text, View, TextInput
from widgetastic_patternfly import (
SelectorDropdown, Dropdown, BootstrapSelect, Input, Button, Tab
)
from wrapanapi.utils import eval_strings
from cfme import exceptions
from cfme.base.credential import TokenCredential
from cfme.base.login import BaseLoggedInPage
from cfme.common import TagPageView, PolicyProfileAssignable
from cfme.common.candu_views import OptionForm
from cfme.common.provider import BaseProvider, DefaultEndpoint, DefaultEndpointForm, provider_types
from cfme.common.provider_views import (
BeforeFillMixin, ContainerProviderAddView, ContainerProvidersView,
ContainerProviderEditView, ContainerProviderEditViewUpdated, ProvidersView,
ContainerProviderAddViewUpdated, ProviderSideBar,
ProviderDetailsToolBar, ProviderDetailsView, ProviderToolBar)
from cfme.modeling.base import BaseCollection
from cfme.utils import version
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.browser import browser
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.varmeth import variable
from cfme.utils.wait import wait_for
from widgetastic_manageiq import (
SummaryTable, BreadCrumb, Accordion, ManageIQTree, LineChart
)
class ContainersProviderDefaultEndpoint(DefaultEndpoint):
"""Represents Containers Provider default endpoint"""
credential_class = TokenCredential
@property
def view_value_mapping(self):
out = {'hostname': self.hostname,
'password': self.token,
'api_port': self.api_port,
'sec_protocol': self.sec_protocol}
if self.sec_protocol.lower() == 'ssl trusting custom ca' and hasattr(self, 'get_ca_cert'):
out['trusted_ca_certificates'] = self.get_ca_cert(
{"username": self.ssh_creds.principal,
"password": self.ssh_creds.secret,
"hostname": self.master_hostname})
out['confirm_password'] = version.pick({
version.LOWEST: self.token,
'5.9': None})
return out
class ContainersProviderEndpointsForm(View):
"""
represents default Containers Provider endpoint form in UI (Add/Edit dialogs)
"""
@View.nested
class default(Tab, DefaultEndpointForm, BeforeFillMixin): # NOQA
TAB_NAME = 'Default'
sec_protocol = BootstrapSelect('default_security_protocol')
trusted_ca_certificates = TextInput('default_tls_ca_certs')
api_port = Input('default_api_port')
@View.nested
class virtualization(Tab, BeforeFillMixin): # NOQA
TAB_NAME = 'Virtualization'
kubevirt_token = Input('kubevirt_password')
validate = Button('Validate')
@View.nested
class metrics(Tab, BeforeFillMixin): # NOQA
TAB_NAME = VersionPick({
Version.lowest(): 'Hawkular',
'5.9': 'Metrics'
})
sec_protocol = VersionPick({
Version.lowest(): BootstrapSelect(id='hawkular_security_protocol'),
'5.9': BootstrapSelect(id='metrics_security_protocol')
})
trusted_ca_certificates = VersionPick({
Version.lowest(): TextInput('hawkular_tls_ca_certs'),
'5.9': TextInput('metrics_tls_ca_certs')
})
hostname = VersionPick({
Version.lowest(): Input('hawkular_hostname'),
'5.9': Input('metrics_hostname')
})
api_port = VersionPick({
Version.lowest(): Input('hawkular_api_port'),
'5.9': Input('metrics_api_port')
})
validate = Button('Validate')
@View.nested
class alerts(Tab, BeforeFillMixin): # NOQA
TAB_NAME = 'Alerts'
sec_protocol = BootstrapSelect(id='prometheus_alerts_security_protocol')
trusted_ca_certificates = TextInput('prometheus_alerts_tls_ca_certs')
hostname = Input('prometheus_alerts_hostname')
api_port = Input('prometheus_alerts_api_port')
validate = Button('Validate')
class LoggingableView(View):
monitor = Dropdown('Monitoring')
def get_logging_url(self):
def report_kibana_failure():
raise RuntimeError("Kibana not found in the window title or content")
browser_instance = browser()
all_windows_before = browser_instance.window_handles
appliance_window = browser_instance.current_window_handle
self.monitor.item_select('External Logging')
all_windows_after = browser_instance.window_handles
new_windows = set(all_windows_after) - set(all_windows_before)
if not new_windows:
raise RuntimeError("No logging window was open!")
logging_window = new_windows.pop()
browser_instance.switch_to_window(logging_window)
logging_url = browser_instance.current_url
wait_for(lambda: "kibana" in
browser_instance.title.lower() + " " +
browser_instance.page_source.lower(),
fail_func=report_kibana_failure, num_sec=60, delay=5)
browser_instance.close()
browser_instance.switch_to_window(appliance_window)
return logging_url
class ContainerProviderDetailsView(ProviderDetailsView, LoggingableView):
"""
Container Details page
"""
@property
def is_displayed(self):
return (super(ContainerProviderDetailsView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Containers', 'Providers'])
@attr.s(hash=False)
class ContainersProvider(BaseProvider, Pretty, PolicyProfileAssignable):
PLURAL = 'Providers'
provider_types = {}
in_version = ('5.5', version.LATEST)
category = "container"
pretty_attrs = [
'name',
'key',
'zone',
'metrics_type',
'alerts_type']
STATS_TO_MATCH = [
'num_project',
'num_service',
'num_replication_controller',
'num_pod',
'num_node',
'num_image_registry',
'num_container']
# TODO add 'num_volume'
string_name = "Containers"
detail_page_suffix = 'provider_detail'
edit_page_suffix = 'provider_edit_detail'
quad_name = None
db_types = ["ContainerManager"]
endpoints_form = ContainersProviderEndpointsForm
all_view = ContainerProvidersView
details_view = ContainerProviderDetailsView
refresh_text = 'Refresh items and relationships'
name = attr.ib(default=None)
key = attr.ib(default=None)
zone = attr.ib(default=None)
metrics_type = attr.ib(default=None)
alerts_type = attr.ib(default=None)
provider_data = attr.ib(default=None)
def __attrs_post_init__(self):
super(ContainersProvider, self).__attrs_post_init__()
self.parent = self.appliance.collections.containers_providers
@property
def view_value_mapping(self):
mapping = {
'name': self.name,
'prov_type': self.type,
'zone': self.zone
}
return mapping
@variable(alias='db')
def num_project(self):
return self._num_db_generic('container_projects')
@num_project.variant('ui')
def num_project_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Projects"))
@variable(alias='db')
def num_service(self):
return self._num_db_generic('container_services')
@num_service.variant('ui')
def num_service_ui(self):
if self.appliance.version < "5.7":
name = "Services"
else:
name = "Container Services"
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of(name))
@variable(alias='db')
def num_replication_controller(self):
return self._num_db_generic('container_replicators')
@num_replication_controller.variant('ui')
def num_replication_controller_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Replicators"))
@variable(alias='db')
def num_container_group(self):
return self._num_db_generic('container_groups')
@num_container_group.variant('ui')
def num_container_group_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Pods"))
@variable(alias='db')
def num_pod(self):
# potato tomato
return self.num_container_group()
@num_pod.variant('ui')
def num_pod_ui(self):
# potato tomato
return self.num_container_group(method='ui')
@variable(alias='db')
def num_node(self):
return self._num_db_generic('container_nodes')
@num_node.variant('ui')
def num_node_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Nodes"))
@variable(alias='db')
def num_container(self):
# Containers are linked to providers through container definitions and then through pods
query = version.pick({
version.LOWEST: "SELECT count(*) "
"FROM ext_management_systems, container_groups, container_definitions, containers "
"WHERE containers.container_definition_id=container_definitions.id "
"AND container_definitions.container_group_id=container_groups.id "
"AND container_groups.ems_id=ext_management_systems.id "
"AND ext_management_systems.name='{}'".format(self.name),
'5.9': "SELECT count(*) "
"FROM ext_management_systems, container_groups, containers "
"WHERE containers.container_group_id=container_groups.id "
"AND container_groups.ems_id=ext_management_systems.id "
"AND ext_management_systems.name='{}'".format(self.name)
})
res = self.appliance.db.client.engine.execute(query)
return int(res.first()[0])
@num_container.variant('ui')
def num_container_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Containers"))
@variable(alias='db')
def num_image(self):
return self._num_db_generic('container_images')
@num_image.variant('ui')
def num_image_ui(self):
if self.appliance.version < "5.7":
name = "Images"
else:
name = "Container Images"
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of(name))
@variable(alias='db')
def num_image_registry(self):
return self._num_db_generic('container_image_registries')
@num_image_registry.variant('ui')
def num_image_registry_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Image Registries"))
def pods_per_ready_status(self):
"""Grabing the Container Statuses Summary of the pods from API"""
# TODO: Add later this logic to wrapanapi
entities = self.mgmt.api.get('pod')[1]['items']
out = {}
for entity_j in entities:
out[entity_j['metadata']['name']] = {
condition['type']: eval_strings([condition['status']]).pop()
for condition in entity_j['status'].get('conditions', [])
}
return out
@attr.s
class ContainersProviderCollection(BaseCollection):
"""Collection object for ContainersProvider objects
"""
ENTITY = ContainersProvider
def all(self):
view = navigate_to(self, 'All')
provs = view.entities.get_all(surf_pages=True)
# trying to figure out provider type and class
# todo: move to all providers collection later
def _get_class(pid):
prov_type = self.appliance.rest_api.collections.providers.get(id=pid)['type']
for prov_class in provider_types('infra').values():
if prov_class.db_types[0] in prov_type:
return prov_class
return [self.instantiate(prov_class=_get_class(p.data['id']), name=p.name) for p in provs]
def instantiate(self, prov_class, *args, **kwargs):
return prov_class.from_collection(self, *args, **kwargs)
def create(self, prov_class, *args, **kwargs):
# ugly workaround until I move everything to main class
class_attrs = [at.name for at in attr.fields(prov_class)]
init_kwargs = {}
create_kwargs = {}
for name, value in kwargs.items():
if name not in class_attrs:
create_kwargs[name] = value
else:
init_kwargs[name] = value
obj = self.instantiate(prov_class, *args, **init_kwargs)
obj.create(**create_kwargs)
return obj
@navigator.register(ContainersProviderCollection, 'All')
@navigator.register(ContainersProvider, 'All')
class All(CFMENavigateStep):
VIEW = ContainerProvidersView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Providers')
def resetter(self):
# Reset view and selection
self.view.toolbar.view_selector.select("Grid View")
self.view.paginator.reset_selection()
@navigator.register(ContainersProviderCollection, 'Add')
@navigator.register(ContainersProvider, 'Add')
class Add(CFMENavigateStep):
def container_provider_view_class(self):
return VersionPick({
Version.lowest(): ContainerProviderAddView,
'5.9': ContainerProviderAddViewUpdated
})
@property
def VIEW(self): # noqa
return self.container_provider_view_class().pick(self.obj.appliance.version)
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select(
VersionPick({
Version.lowest(): 'Add Existing Containers Provider',
'5.9': 'Add a new Containers Provider'
}).pick(self.obj.appliance.version))
@navigator.register(ContainersProvider, 'Details')
class Details(CFMENavigateStep):
VIEW = ContainerProviderDetailsView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).click()
def resetter(self):
self.view.toolbar.view_selector.select("Summary View")
@navigator.register(ContainersProvider, 'Edit')
class Edit(CFMENavigateStep):
def container_provider_edit_view_class(self):
return VersionPick({
Version.lowest(): ContainerProviderEditView,
'5.9': ContainerProviderEditViewUpdated
})
@property
def VIEW(self): # noqa
return self.container_provider_edit_view_class().pick(self.obj.appliance.version)
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).check()
self.prerequisite_view.toolbar.configuration.item_select(
'Edit Selected Containers Provider')
@navigator.register(ContainersProvider, 'EditFromDetails')
class EditFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Containers Provider')
@navigator.register(ContainersProvider, 'EditTags')
class EditTags(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).click()
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
@navigator.register(ContainersProvider, 'EditTagsFromDetails')
class EditTagsFromDetails(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
@navigator.register(ContainersProvider, 'TimelinesFromDetails')
class TimelinesFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.monitoring.item_select('Timelines')
@navigator.register(ContainersProvider, 'TopologyFromDetails')
class TopologyFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
# TODO: implement topology view
self.prerequisite_view.toolbar.view_selector.select("Topology View")
class AdHocMetricsView(BaseLoggedInPage):
filter_dropdown = SelectorDropdown('uib-tooltip', 'Filter by')
filter_result_header = Text('h5.ng-binding')
apply_btn = Button("Apply Filters")
selected_filter = None
@property
def is_displayed(self):
return False
def wait_for_filter_option_to_load(self):
wait_for(lambda: bool(self.filter_dropdown.items), delay=5, num_sec=60)
def wait_for_results_to_load(self):
wait_for(lambda: bool(int(self.filter_result_header.text.split()[0])),
delay=5, num_sec=60)
def apply_filter(self):
self.apply_btn.click()
def set_filter(self, desired_filter):
self.selected_filter = desired_filter
self.filter_dropdown.fill_with(desired_filter)
def get_random_filter(self):
return str(random.choice(self.filter_dropdown.items))
def get_total_results_count(self):
return int(self.filter_result_header.text.split()[0])
@navigator.register(ContainersProvider, 'AdHoc')
class AdHocMain(CFMENavigateStep):
VIEW = AdHocMetricsView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.monitoring.item_select('Ad hoc Metrics')
class ContainerProvidersUtilizationView(View):
title = Text(".//div[@id='main-content']//h1")
options = View.nested(OptionForm)
cpu = LineChart(id='miq_chart_parent_candu_0')
memory = LineChart(id='miq_chart_parent_candu_1')
network = LineChart(id='miq_chart_parent_candu_2')
@property
def is_displayed(self):
return False
@navigator.register(ContainersProvider, 'Utilization')
class Utilization(CFMENavigateStep):
VIEW = ContainerProvidersUtilizationView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.monitoring.item_select("Utilization")
class ContainerObjectAllBaseView(ProvidersView):
"""Base class for container object All view.
SUMMARY_TEXT should be defined in child.
"""
summary = Text('//div[@id="main-content"]//h1')
policy = Dropdown('Policy')
download = Dropdown('Download')
toolbar = View.nested(ProviderToolBar)
@property
def table(self):
return self.entities.elements
@property
def is_displayed(self):
# We use 'in' for this condition since when we use search it'll include (Names with "...")
return self.SUMMARY_TEXT in self.summary.text
class ContainerObjectDetailsEntities(View):
properties = SummaryTable(title="Properties")
status = SummaryTable(title="Status")
relationships = SummaryTable(title="Relationships")
overview = SummaryTable(title="Overview")
smart_management = SummaryTable(title="Smart Management")
labels = SummaryTable(title="Labels")
class ContainerObjectDetailsBaseView(BaseLoggedInPage, LoggingableView):
title = Text('//div[@id="main-content"]//h1')
breadcrumb = BreadCrumb(locator='//ol[@class="breadcrumb"]')
toolbar = View.nested(ProviderDetailsToolBar)
entities = View.nested(ContainerObjectDetailsEntities)
containers = StatusBox('Containers')
services = StatusBox('Services')
images = StatusBox('Images')
pods = ContainerSummaryTable(title='Pods')
@View.nested
class sidebar(ProviderSideBar): # noqa
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
@property
def is_displayed(self):
return (
self.title.is_displayed and
self.breadcrumb.is_displayed and
# We use 'in' for this condition because when we use search the
# text will include include (Names with "...")
'{} (Summary)'.format(self.context['object'].name) in self.breadcrumb.active_location
)
# Common methods:
class ContainersTestItem(object):
"""This is a generic test item. Especially used for parametrized functions
"""
__test__ = False
def __init__(self, obj, polarion_id, **additional_attrs):
"""Args:
* obj: The container object in this test (e.g. Image)
* The polarion test case ID
"""
self.obj = obj
self.polarion_id = polarion_id
for name, value in additional_attrs.items():
self.__setattr__(name, value)
def pretty_id(self):
return '{} ({})'.format(
getattr(self.obj, '__name__', str(self.obj)),
self.polarion_id)
@classmethod
def get_pretty_id(cls, obj):
"""Since sometimes the test object is wrapped within markers,
it's difficult to find get it inside the args tree.
hence we use this to get the object and all pretty_id function.
Args:
* obj: Either a ContainersTestItem or a marker that include it
returns:
str pretty id
"""
if isinstance(obj, cls):
return obj.pretty_id()
elif hasattr(obj, 'args') and hasattr(obj, '__iter__'):
for arg in obj.args:
pretty_id = cls.get_pretty_id(arg)
if pretty_id:
return pretty_id
class LoadDetailsMixin(object):
"""Embed load details functionality for objects -
required for some classes like PolicyProfileAssignable"""
def load_details(self, refresh=False):
view = navigate_to(self, 'Details')
if refresh:
view.browser.refresh()
class Labelable(object):
"""Provide the functionality to set labels"""
_LABEL_NAMEVAL_PATTERN = re.compile(r'^[A-Za-z0-9_.]+$')
def get_labels(self):
"""List labels"""
return self.mgmt.list_labels()
def set_label(self, name, value):
"""Sets a label to the object instance
Args:
:var name: the name of the label
:var value: the value of the label
Returns:
self.mgmt.set_label return value.
"""
assert self._LABEL_NAMEVAL_PATTERN.match(name), \
'name part ({}) must match the regex pattern {}'.format(
name, self._LABEL_NAMEVAL_PATTERN.pattern)
assert self._LABEL_NAMEVAL_PATTERN.match(value), \
'value part ({}) must match the regex pattern {}'.format(
value, self._LABEL_NAMEVAL_PATTERN.pattern)
return self.mgmt.set_label(name, value)
def remove_label(self, name, silent_failure=False):
"""Remove label by name.
Args:
name: name of label
silent_failure: whether to raise an error or not in case of failure.
Returns: ``bool`` pass or fail
Raises:
:py:class:`LabelNotFoundException`.
"""
try:
self.mgmt.delete_label(name)
return True
except Exception: # TODO: add appropriate exception in wrapanapi
failure_signature = format_exc()
if silent_failure:
logger.warning(failure_signature)
return False
raise exceptions.LabelNotFoundException(failure_signature)
def navigate_and_get_rows(provider, obj, count, silent_failure=False):
"""Get <count> random rows from the obj list table,
if <count> is greater that the number of rows, return number of rows.
Args:
provider: containers provider
obj: the containers object
table: the object's Table object
count: number of random rows to return
silent_failure: If True and no records found for obj, it'll
return None instead of raise exception
return: list of rows"""
view = navigate_to(obj, 'All')
view.toolbar.view_selector.list_button.click()
if filter(lambda msg: 'No Records Found.' in msg.text, view.flash.messages) and silent_failure:
return []
view.paginator.set_items_per_page(1000)
rows = list(view.table.rows())
if not rows:
return []
return sample(rows, min(count, len(rows)))
def refresh_and_navigate(*args, **kwargs):
# Refreshing the page and navigate - we need this for cases that we already in
# the page and want to reload it
view = navigate_to(*args, **kwargs)
view.browser.refresh()
return view
class GetRandomInstancesMixin(object):
def get_random_instances(self, count=1):
"""Getting random instances of the object."""
all_instances = self.all()
return random.sample(all_instances, min(count, len(all_instances)))
| gpl-2.0 |
juliebehr/gaff2xml | openmoltools/amber_parser.py | 3 | 22162 | #!/usr/bin/env python
import sys
import math
import simtk.openmm.app.element as element
import simtk.unit as unit
import subprocess
import datetime
from six.moves import cStringIO
import mdtraj as md
import logging
logger = logging.getLogger(__name__)
def fix(atomClass):
if atomClass == 'X':
return ''
return atomClass
elements = {}
for elem in element.Element._elements_by_symbol.values():
num = elem.atomic_number
if num not in elements or elem.mass < elements[num].mass:
elements[num] = elem
OTHER = 0
ATOMS = 1
CONNECT = 2
CONNECTIVITY = 3
RESIDUECONNECT = 4
section = OTHER
charge14scale = 1.0 / 1.2
epsilon14scale = 0.5
skipResidues = ['CIO', 'IB'] # "Generic" ions defined by Amber, which are identical to other real ions
skipClasses = ['OW', 'HW'] # Skip water atoms, since we define these in separate files
class AmberParser(object):
def __init__(self, override_mol2_residue_name=None):
"""Create an AmberParser object for converting amber force field files to XML format.
Parameters
----------
override_mol2_residue_name : str, default=None
If given, use this name to override mol2 residue names.
Useful to ensure that multiple ligands have unique residue
names, as required by the OpenMM ffXML parser.
"""
self.override_mol2_residue_name = override_mol2_residue_name
self.current_mol2 = 0
self.residueAtoms = {}
self.residueBonds = {}
self.residueConnections = {}
self.types = []
self.type_names = []
self.masses = {}
self.resAtomTypes = {}
self.vdwEquivalents = {}
self.vdw = {}
self.charge = {}
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.set_provenance()
def addAtom(self, residue, atomName, atomClass, element, charge, use_numeric_types=True):
"""Add an atom to the database of FF data.
Notes
-----
use_numeric_types was not originally present in the OpenMM AMBER
parsers. It was added so that we can have atom types of the form
"RES-X", where RES is the name of the molecule or residue and X
is the atom numbering within that molecule. use_numeric_types is
set to False when processing mol2 files--e.g. for ligands.
"""
if residue is None:
return
type_id = len(self.types)
self.residueAtoms[residue].append([atomName, type_id])
self.types.append((atomClass, element, charge))
if use_numeric_types:
self.type_names.append("%d" % (type_id))
else:
self.type_names.append("%s-%s" % (residue, atomName))
def addBond(self, residue, atom1, atom2):
"""Add a bond to the database of FF data."""
if residue is None:
return
self.residueBonds[residue].append((atom1, atom2))
def addExternalBond(self, residue, atom):
"""Add an external bond to the database of FF data."""
if residue is None:
return
if atom != -1:
self.residueConnections[residue] += [atom]
def process_mol2_file(self, inputfile):
"""Process an AMBER GAFF-compatible mol2 file.
Parameters
----------
inputfile : str
filename of an .mol2 file
Notes
-----
Antechamber is known to produce NONSTANDARD mol2 files. This function
is designed to work with those nonstandard mol2 files, not
Tripos standard mol2 files. We are forced to live with the poor
decisions of our predecessors...
"""
atoms, bonds = md.formats.mol2.mol2_to_dataframes(inputfile)
if self.override_mol2_residue_name is None:
residue_name = atoms.resName[1] # To Do: Add check for consistency
else:
residue_name = self.override_mol2_residue_name
# Give each mol2 file a unique numbering to avoid conflicts.
residue_name = "%s-%d" % (residue_name, self.current_mol2)
self.current_mol2 += 1
self.residueAtoms[residue_name] = []
self.residueBonds[residue_name] = []
self.residueConnections[residue_name] = []
for (i0, i1, name, x, y, z, atype, code, resname, charge) in atoms.itertuples(index=True):
# i0 and i1 are zero-based and one-based indices, respectively
full_name = residue_name + "_" + name
element_symbol = md.formats.mol2.gaff_elements[atype]
e = element.Element.getBySymbol(element_symbol)
self.addAtom(residue_name, name, atype, e, charge, use_numeric_types=False) # use_numeric_types set to false to use string-based atom names, rather than numbers
self.vdwEquivalents[full_name] = atype
for (id0, id1, bond_type) in bonds.itertuples(False):
i = id0 - 1 # Subtract 1 for zero based indexing in OpenMM???
j = id1 - 1 # Subtract 1 for zero based indexing in OpenMM???
self.addBond(residue_name, i, j)
def process_library_file(self, inputfile):
"""Process an AMBER .lib file.
Parameters
----------
inputfile : str
filename of an .lib file
"""
for line in open(inputfile):
if line.startswith('!entry'):
fields = line.split('.')
residue = fields[1]
if residue in skipResidues:
residue = None
continue
key = fields[3].split()[0]
if key == 'atoms':
section = ATOMS
self.residueAtoms[residue] = []
self.residueBonds[residue] = []
self.residueConnections[residue] = []
elif key == 'connect':
section = CONNECT
elif key == 'connectivity':
section = CONNECTIVITY
elif key == 'residueconnect':
section = RESIDUECONNECT
else:
section = OTHER
elif section == ATOMS:
fields = line.split()
atomName = fields[0][1:-1]
atomClass = fields[1][1:-1]
if fields[6] == '-1':
# Workaround for bug in some Amber files.
if atomClass[0] == 'C':
elem = elements[6]
elif atomClass[0] == 'H':
elem = elements[1]
else:
raise ValueError('Illegal atomic number: ' + line)
else:
elem = elements[int(fields[6])]
self.charge = float(fields[7])
self.addAtom(residue, atomName, atomClass, elem, self.charge)
elif section == CONNECT:
self.addExternalBond(residue, int(line) - 1)
elif section == CONNECTIVITY:
fields = line.split()
self.addBond(residue, int(fields[0]) - 1, int(fields[1]) - 1)
elif section == RESIDUECONNECT:
# Some Amber files have errors in them, incorrectly listing atoms that should not be
# connected in the first two positions. We therefore rely on the "connect" section for
# those, using this block only for other external connections.
for atom in [int(x) - 1 for x in line.split()[2:]]:
self.addExternalBond(residue, atom)
def process_dat_file(self, inputfile):
"""Process an AMBER .dat file.
Parameters
----------
inputfile : str
filename of an .dat file
"""
block = 0
continueTorsion = False
for line in open(inputfile):
line = line.strip()
if block == 0: # Title
block += 1
elif block == 1: # Mass
fields = line.split()
if len(fields) == 0:
block += 1
else:
self.masses[fields[0]] = float(fields[1])
elif block == 2: # Hydrophilic atoms
block += 1
elif block == 3: # Bonds
if len(line) == 0:
block += 1
else:
fields = line[5:].split()
self.bonds.append((line[:2].strip(), line[3:5].strip(), fields[0], fields[1]))
elif block == 4: # Angles
if len(line) == 0:
block += 1
else:
fields = line[8:].split()
self.angles.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), fields[0], fields[1]))
elif block == 5: # Torsions
if len(line) == 0:
block += 1
else:
fields = line[11:].split()
periodicity = int(float(fields[3]))
if continueTorsion:
self.torsions[-1] += [float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)]
else:
self.torsions.append([line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)])
continueTorsion = (periodicity < 0)
elif block == 6: # Improper torsions
if len(line) == 0:
block += 1
else:
fields = line[11:].split()
self.impropers.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), fields[0], fields[1], fields[2]))
elif block == 7: # 10-12 hbond potential
if len(line) == 0:
block += 1
elif block == 8: # VDW equivalents
if len(line) == 0:
block += 1
else:
fields = line.split()
for atom in fields[1:]:
self.vdwEquivalents[atom] = fields[0]
elif block == 9: # VDW type
block += 1
self.vdwType = line.split()[1]
if self.vdwType not in ['RE', 'AC']:
raise ValueError('Nonbonded type (KINDNB) must be RE or AC')
elif block == 10: # VDW parameters
if len(line) == 0:
block += 1
else:
fields = line.split()
self.vdw[fields[0]] = (fields[1], fields[2])
def process_frc_file(self, inputfile):
"""Process an AMBER .frc file.
Parameters
----------
inputfile : str
filename of an .frc file
"""
block = ''
continueTorsion = False
first = True
for line in open(inputfile):
line = line.strip()
if len(line) == 0 or first:
block = None
first = False
elif block is None:
block = line
elif block.startswith('MASS'):
fields = line.split()
self.masses[fields[0]] = float(fields[1])
elif block.startswith('BOND'):
fields = line[5:].split()
self.bonds.append((line[:2].strip(), line[3:5].strip(), fields[0], fields[1]))
elif block.startswith('ANGL'):
fields = line[8:].split()
self.angles.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), fields[0], fields[1]))
elif block.startswith('DIHE'):
fields = line[11:].split()
periodicity = int(float(fields[3]))
if continueTorsion:
self.torsions[-1] += [float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)]
else:
self.torsions.append([line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)])
continueTorsion = (periodicity < 0)
elif block.startswith('IMPR'):
fields = line[11:].split()
self.impropers.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), fields[0], fields[1], fields[2]))
elif block.startswith('NONB'):
fields = line.split()
self.vdw[fields[0]] = (fields[1], fields[2])
def generate_xml(self):
"""Return the processed forcefield files as an XML stream.
Returns
-------
stream : cStringIO
The text of the output XML forcefield data.
Notes
-----
The stream can be written to disk via:
outfile = open("my_forcefield.xml", 'w')
outfile.write(stream.read())
outfile.close()
"""
stream = cStringIO()
write_stream = lambda x: stream.write(x + "\n")
write_stream(self.provenance)
write_stream("<ForceField>")
write_stream(" <AtomTypes>")
for index, type in enumerate(self.types):
write_stream(""" <Type name="%s" class="%s" element="%s" mass="%s"/>""" % (self.type_names[index], type[0], type[1].symbol, type[1].mass.value_in_unit(unit.amu)))
write_stream(" </AtomTypes>")
write_stream(" <Residues>")
for res in sorted(self.residueAtoms):
write_stream(""" <Residue name="%s">""" % res)
for atom in self.residueAtoms[res]:
atom_name, type_id = tuple(atom)
atom_type = self.type_names[type_id]
write_stream(" <Atom name=\"%s\" type=\"%s\"/>" % (atom_name, atom_type))
if res in self.residueBonds:
for bond in self.residueBonds[res]:
write_stream(""" <Bond from="%d" to="%d"/>""" % bond)
if res in self.residueConnections:
for bond in self.residueConnections[res]:
write_stream(""" <ExternalBond from="%d"/>""" % bond)
write_stream(" </Residue>")
write_stream(" </Residues>")
write_stream(" <HarmonicBondForce>")
processed = set()
for bond in self.bonds:
signature = (bond[0], bond[1])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
length = float(bond[3]) * 0.1
k = float(bond[2]) * 2 * 100 * 4.184
write_stream(""" <Bond class1="%s" class2="%s" length="%s" k="%s"/>""" % (bond[0], bond[1], str(length), str(k)))
write_stream(" </HarmonicBondForce>")
write_stream(" <HarmonicAngleForce>")
processed = set()
for angle in self.angles:
signature = (angle[0], angle[1], angle[2])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
theta = float(angle[4]) * math.pi / 180.0
k = float(angle[3]) * 2 * 4.184
write_stream(""" <Angle class1="%s" class2="%s" class3="%s" angle="%s" k="%s"/>""" % (angle[0], angle[1], angle[2], str(theta), str(k)))
write_stream(" </HarmonicAngleForce>")
write_stream(" <PeriodicTorsionForce>")
processed = set()
for tor in reversed(self.torsions):
signature = (fix(tor[0]), fix(tor[1]), fix(tor[2]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Proper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = tor[i] * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
processed = set()
for tor in reversed(self.impropers):
signature = (fix(tor[2]), fix(tor[0]), fix(tor[1]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Improper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = float(tor[i]) * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
write_stream(" </PeriodicTorsionForce>")
write_stream(""" <NonbondedForce coulomb14scale="%g" lj14scale="%s">""" % (charge14scale, epsilon14scale))
sigmaScale = 0.1 * 2.0 / (2.0 ** (1.0 / 6.0))
for index, type in enumerate(self.types):
atomClass = type[0]
q = type[2]
if atomClass in self.vdwEquivalents:
atomClass = self.vdwEquivalents[atomClass]
if atomClass in self.vdw:
params = [float(x) for x in self.vdw[atomClass]]
if self.vdwType == 'RE':
sigma = params[0] * sigmaScale
epsilon = params[1] * 4.184
else:
sigma = (params[0] / params[1]) ** (1.0 / 6.0)
epsilon = 4.184 * params[1] * params[1] / (4 * params[0])
else:
sigma = 1.0
epsilon = 0
if q != 0 or epsilon != 0:
write_stream(""" <Atom type="%s" charge="%s" sigma="%s" epsilon="%s"/>""" % (self.type_names[index], q, sigma, epsilon))
write_stream(" </NonbondedForce>")
write_stream("</ForceField>")
stream.seek(0)
return stream
def parse_filenames(self, filenames):
"""Process a list of filenames according to their filetype suffixes
Parameters
----------
filenames : list (of strings)
List of filenames of type (lib, off, dat, or mol2)
Notes
-----
When parameterizing small molecules, the correct order of inputs is:
$AMBER_LIB_PATH/gaff.dat ligand_name.mol2 ligand_name.frcmod
"""
for inputfile in filenames:
if inputfile.endswith('.lib') or inputfile.endswith('.off'):
self.process_library_file(inputfile)
elif inputfile.endswith('.dat'):
self.process_dat_file(inputfile)
elif inputfile.endswith("mol2"):
self.process_mol2_file(inputfile)
else:
self.process_frc_file(inputfile)
self.reduce_atomtypes()
def reduce_atomtypes(self, symmetrize_protons=False):
"""Reduce the list of atom self.types.
Parameters
----------
symmetrize_protons : bool, default=False
if True, multiple hydrogens bound to the same heavy atom
should all use the same type.
Notes
-----
The default behavior of symmetrize_protons differs from the
original OpenMM version of this script. For arbitrary small
molecules, we can not assume symmetric protons.
"""
removeType = [False] * len(self.types)
for res in self.residueAtoms:
if res not in self.residueBonds:
continue
atomBonds = [[] for atom in self.residueAtoms[res]]
for bond in self.residueBonds[res]:
atomBonds[bond[0]].append(bond[1])
atomBonds[bond[1]].append(bond[0])
if symmetrize_protons is True:
for index, atom in enumerate(self.residueAtoms[res]):
hydrogens = [x for x in atomBonds[index] if self.types[self.residueAtoms[res][x][1]][1] == element.hydrogen]
for h in hydrogens[1:]:
removeType[self.residueAtoms[res][h][1]] = True
self.residueAtoms[res][h][1] = self.residueAtoms[res][hydrogens[0]][1]
newTypes = []
replaceWithType = [0] * len(self.types)
for i in range(len(self.types)):
if not removeType[i]:
newTypes.append(self.types[i])
replaceWithType[i] = len(newTypes) - 1
self.types = newTypes
for res in self.residueAtoms:
for atom in self.residueAtoms[res]:
atom[1] = replaceWithType[atom[1]]
def set_provenance(self):
"""Set the provenance attribute with information about the current python session."""
self.provenance = []
line = """<!-- %s -->\n""" % "Time and parameters of origin:"
self.provenance.append(line)
now = datetime.datetime.now()
line = """<!-- %s -->\n""" % str(now)
self.provenance.append(line)
cmd_string = subprocess.list2cmdline(sys.argv[1:])
cmd_string = cmd_string.replace("-", " ") # Replace XML specific characters that can break some XML parsers
cmd_string = cmd_string.replace(">", " ") #
cmd_string = cmd_string.replace("<", " ") #
line = """<!-- %s -->\n""" % cmd_string
self.provenance.append(line)
self.provenance = "".join(self.provenance)
| gpl-2.0 |
dednal/chromium.src | third_party/android_platform/development/scripts/stack_core.py | 14 | 10711 | #!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import re
import symbol
def PrintTraceLines(trace_lines):
"""Print back trace."""
maxlen = max(map(lambda tl: len(tl[1]), trace_lines))
print
print "Stack Trace:"
print " RELADDR " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for tl in trace_lines:
(addr, symbol_with_offset, location) = tl
print " %8s %s %s" % (addr, symbol_with_offset.ljust(maxlen), location)
return
def PrintValueLines(value_lines):
"""Print stack data values."""
maxlen = max(map(lambda tl: len(tl[2]), value_lines))
print
print "Stack Data:"
print " ADDR VALUE " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for vl in value_lines:
(addr, value, symbol_with_offset, location) = vl
print " %8s %8s %s %s" % (addr, value, symbol_with_offset.ljust(maxlen), location)
return
UNKNOWN = "<unknown>"
HEAP = "[heap]"
STACK = "[stack]"
def PrintOutput(trace_lines, value_lines, more_info):
if trace_lines:
PrintTraceLines(trace_lines)
if value_lines:
# TODO(cjhopman): it seems that symbol.SymbolInformation always fails to
# find information for addresses in value_lines in chrome libraries, and so
# value_lines have little value to us and merely clutter the output.
# Since information is sometimes contained in these lines (from system
# libraries), don't completely disable them.
if more_info:
PrintValueLines(value_lines)
def PrintDivider():
print
print "-----------------------------------------------------\n"
def ConvertTrace(lines, more_info):
"""Convert strings containing native crash to a stack."""
process_info_line = re.compile("(pid: [0-9]+, tid: [0-9]+.*)")
signal_line = re.compile("(signal [0-9]+ \(.*\).*)")
register_line = re.compile("(([ ]*[0-9a-z]{2} [0-9a-f]{8}){4})")
thread_line = re.compile("(.*)(\-\-\- ){15}\-\-\-")
dalvik_jni_thread_line = re.compile("(\".*\" prio=[0-9]+ tid=[0-9]+ NATIVE.*)")
dalvik_native_thread_line = re.compile("(\".*\" sysTid=[0-9]+ nice=[0-9]+.*)")
width = "{8}"
if symbol.ARCH == "arm64" or symbol.ARCH == "x86_64" or symbol.ARCH == "x64":
width = "{16}"
# Matches LOG(FATAL) lines, like the following example:
# [FATAL:source_file.cc(33)] Check failed: !instances_.empty()
log_fatal_line = re.compile("(\[FATAL\:.*\].*)$")
# Note that both trace and value line matching allow for variable amounts of
# whitespace (e.g. \t). This is because the we want to allow for the stack
# tool to operate on AndroidFeedback provided system logs. AndroidFeedback
# strips out double spaces that are found in tombsone files and logcat output.
#
# Examples of matched trace lines include lines from tombstone files like:
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so (symbol)
# Or lines from AndroidFeedback crash report system logs like:
# 03-25 00:51:05.520 I/DEBUG ( 65): #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# Please note the spacing differences.
trace_line = re.compile("(.*)\#(?P<frame>[0-9]+)[ \t]+(..)[ \t]+(0x)?(?P<address>[0-9a-f]{0,16})[ \t]+(?P<lib>[^\r\n \t]*)(?P<symbol_present> \((?P<symbol_name>.*)\))?") # pylint: disable-msg=C6310
# Matches lines emitted by src/base/debug/stack_trace_android.cc, like:
# #00 0x7324d92d /data/app-lib/org.chromium.native_test-1/libbase.cr.so+0x0006992d
# This pattern includes the unused named capture groups <symbol_present> and
# <symbol_name> so that it can interoperate with the |trace_line| regex.
debug_trace_line = re.compile(
'(.*)(?P<frame>\#[0-9]+ 0x[0-9a-f]' + width + ') '
'(?P<lib>[^+]+)\+0x(?P<address>[0-9a-f]' + width + ')'
'(?P<symbol_present>)(?P<symbol_name>)')
# Examples of matched value lines include:
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so (symbol)
# 03-25 00:51:05.530 I/DEBUG ( 65): bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# Again, note the spacing differences.
value_line = re.compile("(.*)([0-9a-f]" + width + ")[ \t]+([0-9a-f]" + width + ")[ \t]+([^\r\n \t]*)( \((.*)\))?")
# Lines from 'code around' sections of the output will be matched before
# value lines because otheriwse the 'code around' sections will be confused as
# value lines.
#
# Examples include:
# 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
# 03-25 00:51:05.530 I/DEBUG ( 65): 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
code_line = re.compile("(.*)[ \t]*[a-f0-9]" + width +
"[ \t]*[a-f0-9]" + width +
"[ \t]*[a-f0-9]" + width +
"[ \t]*[a-f0-9]" + width +
"[ \t]*[a-f0-9]" + width +
"[ \t]*[ \r\n]") # pylint: disable-msg=C6310
trace_lines = []
value_lines = []
last_frame = -1
# It is faster to get symbol information with a single call rather than with
# separate calls for each line. Since symbol.SymbolInformation caches results,
# we can extract all the addresses that we will want symbol information for
# from the log and call symbol.SymbolInformation so that the results are
# cached in the following lookups.
code_addresses = {}
for ln in lines:
line = unicode(ln, errors='ignore')
lib, address = None, None
match = trace_line.match(line) or debug_trace_line.match(line)
if match:
address, lib = match.group('address', 'lib')
match = value_line.match(line)
if match and not code_line.match(line):
(_0, _1, address, lib, _2, _3) = match.groups()
if lib:
code_addresses.setdefault(lib, set()).add(address)
for lib in code_addresses:
symbol.SymbolInformationForSet(
symbol.TranslateLibPath(lib), code_addresses[lib], more_info)
for ln in lines:
# AndroidFeedback adds zero width spaces into its crash reports. These
# should be removed or the regular expresssions will fail to match.
line = unicode(ln, errors='ignore')
process_header = process_info_line.search(line)
signal_header = signal_line.search(line)
register_header = register_line.search(line)
thread_header = thread_line.search(line)
dalvik_jni_thread_header = dalvik_jni_thread_line.search(line)
dalvik_native_thread_header = dalvik_native_thread_line.search(line)
log_fatal_header = log_fatal_line.search(line)
if (process_header or signal_header or register_header or thread_header or
dalvik_jni_thread_header or dalvik_native_thread_header or
log_fatal_header) :
if trace_lines or value_lines:
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = -1
if process_header:
print process_header.group(1)
if signal_header:
print signal_header.group(1)
if register_header:
print register_header.group(1)
if thread_header:
print thread_header.group(1)
if dalvik_jni_thread_header:
print dalvik_jni_thread_header.group(1)
if dalvik_native_thread_header:
print dalvik_native_thread_header.group(1)
if log_fatal_header:
print log_fatal_header.group(1)
continue
match = trace_line.match(line) or debug_trace_line.match(line)
if match:
frame, code_addr, area, symbol_present, symbol_name = match.group(
'frame', 'address', 'lib', 'symbol_present', 'symbol_name')
if frame <= last_frame and (trace_lines or value_lines):
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = frame
if area == UNKNOWN or area == HEAP or area == STACK:
trace_lines.append((code_addr, "", area))
else:
# If a calls b which further calls c and c is inlined to b, we want to
# display "a -> b -> c" in the stack trace instead of just "a -> c"
info = symbol.SymbolInformation(area, code_addr, more_info)
nest_count = len(info) - 1
for (source_symbol, source_location, object_symbol_with_offset) in info:
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if nest_count > 0:
nest_count = nest_count - 1
trace_lines.append(("v------>", source_symbol, source_location))
else:
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
trace_lines.append((code_addr,
object_symbol_with_offset,
source_location))
if code_line.match(line):
# Code lines should be ignored. If this were exluded the 'code around'
# sections would trigger value_line matches.
continue;
match = value_line.match(line)
if match:
(unused_, addr, value, area, symbol_present, symbol_name) = match.groups()
if area == UNKNOWN or area == HEAP or area == STACK or not area:
value_lines.append((addr, value, "", area))
else:
info = symbol.SymbolInformation(area, value, more_info)
(source_symbol, source_location, object_symbol_with_offset) = info.pop()
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
value_lines.append((addr,
value,
object_symbol_with_offset,
source_location))
PrintOutput(trace_lines, value_lines, more_info)
| bsd-3-clause |
srossross/uvio | uvio/_version.py | 1 | 15758 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "uvio/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| mit |
villalonreina/dipy | dipy/tracking/local/tests/test_tissue_classifier.py | 13 | 5734 |
import numpy as np
import numpy.testing as npt
import scipy.ndimage
from dipy.core.ndindex import ndindex
from dipy.tracking.local import (BinaryTissueClassifier,
ThresholdTissueClassifier,
ActTissueClassifier)
from dipy.tracking.local.localtracking import TissueTypes
def test_binary_tissue_classifier():
"""This tests that the binary tissue classifier returns expected
tissue types.
"""
mask = np.random.random((4, 4, 4))
mask[mask < 0.4] = 0.0
btc_boolean = BinaryTissueClassifier(mask > 0)
btc_float64 = BinaryTissueClassifier(mask)
# test voxel center
for ind in ndindex(mask.shape):
pts = np.array(ind, dtype='float64')
state_boolean = btc_boolean.check_point(pts)
state_float64 = btc_float64.check_point(pts)
if mask[ind] > 0:
npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
else:
npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
npt.assert_equal(state_float64, TissueTypes.ENDPOINT)
# test random points in voxel
for ind in ndindex(mask.shape):
for _ in range(50):
pts = np.array(ind, dtype='float64') + np.random.random(3) - 0.5
state_boolean = btc_boolean.check_point(pts)
state_float64 = btc_float64.check_point(pts)
if mask[ind] > 0:
npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
else:
npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
npt.assert_equal(state_float64, TissueTypes.ENDPOINT)
# test outside points
outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
[0, 0.5, -0.51], [0, -0.51, 0.1]]
for pts in outside_pts:
pts = np.array(pts, dtype='float64')
state_boolean = btc_boolean.check_point(pts)
state_float64 = btc_float64.check_point(pts)
npt.assert_equal(state_boolean, TissueTypes.OUTSIDEIMAGE)
npt.assert_equal(state_float64, TissueTypes.OUTSIDEIMAGE)
def test_threshold_tissue_classifier():
"""This tests that the thresholdy tissue classifier returns expected
tissue types.
"""
tissue_map = np.random.random((4, 4, 4))
ttc = ThresholdTissueClassifier(tissue_map.astype('float32'), 0.5)
# test voxel center
for ind in ndindex(tissue_map.shape):
pts = np.array(ind, dtype='float64')
state = ttc.check_point(pts)
if tissue_map[ind] > 0.5:
npt.assert_equal(state, TissueTypes.TRACKPOINT)
else:
npt.assert_equal(state, TissueTypes.ENDPOINT)
# test random points in voxel
inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
[0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
[0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
for pts in inds:
pts = np.array(pts, dtype='float64')
state = ttc.check_point(pts)
res = scipy.ndimage.map_coordinates(
tissue_map, np.reshape(pts, (3, 1)), order=1, mode='nearest')
if res > 0.5:
npt.assert_equal(state, TissueTypes.TRACKPOINT)
else:
npt.assert_equal(state, TissueTypes.ENDPOINT)
# test outside points
outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
[0, 0.5, -0.51], [0, -0.51, 0.1]]
for pts in outside_pts:
pts = np.array(pts, dtype='float64')
state = ttc.check_point(pts)
npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
def test_act_tissue_classifier():
"""This tests that the act tissue classifier returns expected
tissue types.
"""
gm = np.random.random((4, 4, 4))
wm = np.random.random((4, 4, 4))
csf = np.random.random((4, 4, 4))
tissue_sum = gm + wm + csf
gm /= tissue_sum
wm /= tissue_sum
csf /= tissue_sum
act_tc = ActTissueClassifier(include_map=gm, exclude_map=csf)
# test voxel center
for ind in ndindex(wm.shape):
pts = np.array(ind, dtype='float64')
state = act_tc.check_point(pts)
if csf[ind] > 0.5:
npt.assert_equal(state, TissueTypes.INVALIDPOINT)
elif gm[ind] > 0.5:
npt.assert_equal(state, TissueTypes.ENDPOINT)
else:
npt.assert_equal(state, TissueTypes.TRACKPOINT)
# test random points in voxel
inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
[0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
[0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
for pts in inds:
pts = np.array(pts, dtype='float64')
state = act_tc.check_point(pts)
gm_res = scipy.ndimage.map_coordinates(
gm, np.reshape(pts, (3, 1)), order=1, mode='nearest')
csf_res = scipy.ndimage.map_coordinates(
csf, np.reshape(pts, (3, 1)), order=1, mode='nearest')
if csf_res > 0.5:
npt.assert_equal(state, TissueTypes.INVALIDPOINT)
elif gm_res > 0.5:
npt.assert_equal(state, TissueTypes.ENDPOINT)
else:
npt.assert_equal(state, TissueTypes.TRACKPOINT)
# test outside points
outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
[0, 0.5, -0.51], [0, -0.51, 0.1]]
for pts in outside_pts:
pts = np.array(pts, dtype='float64')
state = act_tc.check_point(pts)
npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
bcorbet/SickRage | lib/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| gpl-3.0 |
erjohnso/libcloud | libcloud/compute/drivers/dimensiondata.py | 4 | 188456 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimension Data Driver
"""
from libcloud.utils.py3 import ET
from libcloud.common.dimensiondata import LooseVersion
from libcloud.common.exceptions import BaseHTTPError
from libcloud.compute.base import NodeDriver, Node, NodeAuthPassword
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
from libcloud.common.dimensiondata import dd_object_to_id
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.common.dimensiondata import (DimensionDataConnection,
DimensionDataStatus)
from libcloud.common.dimensiondata import DimensionDataNetwork
from libcloud.common.dimensiondata import DimensionDataNetworkDomain
from libcloud.common.dimensiondata import DimensionDataVlan
from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification
from libcloud.common.dimensiondata import DimensionDataServerDisk
from libcloud.common.dimensiondata import DimensionDataServerVMWareTools
from libcloud.common.dimensiondata import DimensionDataPublicIpBlock
from libcloud.common.dimensiondata import DimensionDataFirewallRule
from libcloud.common.dimensiondata import DimensionDataFirewallAddress
from libcloud.common.dimensiondata import DimensionDataNatRule
from libcloud.common.dimensiondata import DimensionDataAntiAffinityRule
from libcloud.common.dimensiondata import DimensionDataIpAddressList
from libcloud.common.dimensiondata import DimensionDataChildIpAddressList
from libcloud.common.dimensiondata import DimensionDataIpAddress
from libcloud.common.dimensiondata import DimensionDataPortList
from libcloud.common.dimensiondata import DimensionDataPort
from libcloud.common.dimensiondata import DimensionDataChildPortList
from libcloud.common.dimensiondata import DimensionDataNic
from libcloud.common.dimensiondata import NetworkDomainServicePlan
from libcloud.common.dimensiondata import DimensionDataTagKey
from libcloud.common.dimensiondata import DimensionDataTag
from libcloud.common.dimensiondata import API_ENDPOINTS, DEFAULT_REGION
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.common.dimensiondata import SERVER_NS, NETWORK_NS, GENERAL_NS
from libcloud.utils.py3 import urlencode, ensure_string
from libcloud.utils.xml import fixxpath, findtext, findall
from libcloud.utils.py3 import basestring
from libcloud.compute.types import NodeState, Provider
import sys
# Node state map is a dictionary with the keys as tuples
# These tuples represent:
# (<state_of_node_from_didata>, <is node started?>, <action happening>)
NODE_STATE_MAP = {
('NORMAL', 'false', None):
NodeState.STOPPED,
('PENDING_CHANGE', 'false', None):
NodeState.PENDING,
('PENDING_CHANGE', 'false', 'CHANGE_NETWORK_ADAPTER'):
NodeState.PENDING,
('PENDING_CHANGE', 'true', 'CHANGE_NETWORK_ADAPTER'):
NodeState.PENDING,
('PENDING_CHANGE', 'false', 'EXCHANGE_NIC_VLANS'):
NodeState.PENDING,
('PENDING_CHANGE', 'true', 'EXCHANGE_NIC_VLANS'):
NodeState.PENDING,
('NORMAL', 'true', None):
NodeState.RUNNING,
('PENDING_CHANGE', 'true', 'START_SERVER'):
NodeState.STARTING,
('PENDING_ADD', 'true', 'DEPLOY_SERVER'):
NodeState.STARTING,
('PENDING_ADD', 'true', 'DEPLOY_SERVER_WITH_DISK_SPEED'):
NodeState.STARTING,
('PENDING_CHANGE', 'true', 'SHUTDOWN_SERVER'):
NodeState.STOPPING,
('PENDING_CHANGE', 'true', 'POWER_OFF_SERVER'):
NodeState.STOPPING,
('PENDING_CHANGE', 'true', 'REBOOT_SERVER'):
NodeState.REBOOTING,
('PENDING_CHANGE', 'true', 'RESET_SERVER'):
NodeState.REBOOTING,
('PENDING_CHANGE', 'true', 'RECONFIGURE_SERVER'):
NodeState.RECONFIGURING,
}
OBJECT_TO_TAGGING_ASSET_TYPE_MAP = {
'Node': 'SERVER',
'NodeImage': 'CUSTOMER_IMAGE',
'DimensionDataNetworkDomain': 'NETWORK_DOMAIN',
'DimensionDataVlan': 'VLAN',
'DimensionDataPublicIpBlock': 'PUBLIC_IP_BLOCK'
}
class DimensionDataNodeDriver(NodeDriver):
"""
DimensionData node driver.
Default api_version is used unless specified.
"""
selected_region = None
connectionCls = DimensionDataConnection
name = 'DimensionData'
website = 'http://www.dimensiondata.com/'
type = Provider.DIMENSIONDATA
features = {'create_node': ['password']}
api_version = 1.0
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS and host is None:
raise ValueError(
'Invalid region: %s, no host specified' % (region))
if region is not None:
self.selected_region = API_ENDPOINTS[region]
if api_version is not None:
self.api_version = api_version
super(DimensionDataNodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
api_version=api_version,
region=region,
**kwargs)
def _ex_connection_class_kwargs(self):
"""
Add the region to the kwargs before the connection is instantiated
"""
kwargs = super(DimensionDataNodeDriver,
self)._ex_connection_class_kwargs()
kwargs['region'] = self.selected_region
kwargs['api_version'] = self.api_version
return kwargs
def _create_node_mcp1(self, name, image, auth, ex_description,
ex_network=None,
ex_memory_gb=None,
ex_cpu_specification=None,
ex_is_started=True,
ex_primary_dns=None,
ex_secondary_dns=None, **kwargs):
"""
Create a new DimensionData node
:keyword name: String with a name for this new node (required)
:type name: ``str``
:keyword image: OS Image to boot on node. (required)
:type image: :class:`NodeImage` or ``str``
:keyword auth: Initial authentication information for the
node. (If this is a customer LINUX
image auth will be ignored)
:type auth: :class:`NodeAuthPassword` or ``str`` or
``None``
:keyword ex_description: description for this node (required)
:type ex_description: ``str``
:keyword ex_network: Network to create the node within
(required unless using ex_network_domain
or ex_primary_ipv4)
:type ex_network: :class:`DimensionDataNetwork` or ``str``
:keyword ex_memory_gb: The amount of memory in GB for the
server
:type ex_memory_gb: ``int``
:keyword ex_cpu_specification: The spec of CPU to deploy (
optional)
:type ex_cpu_specification:
:class:`DimensionDataServerCpuSpecification`
:keyword ex_is_started: Start server after creation? default
true (required)
:type ex_is_started: ``bool``
:keyword ex_primary_dns: The node's primary DNS
:type ex_primary_dns: ``str``
:keyword ex_secondary_dns: The node's secondary DNS
:type ex_secondary_dns: ``str``
:return: The newly created :class:`Node`.
:rtype: :class:`Node`
"""
password = None
image_needs_auth = self._image_needs_auth(image)
if image_needs_auth:
if isinstance(auth, basestring):
auth_obj = NodeAuthPassword(password=auth)
password = auth
else:
auth_obj = self._get_and_check_auth(auth)
password = auth_obj.password
server_elm = ET.Element('deployServer', {'xmlns': TYPES_URN})
ET.SubElement(server_elm, "name").text = name
ET.SubElement(server_elm, "description").text = ex_description
image_id = self._image_to_image_id(image)
ET.SubElement(server_elm, "imageId").text = image_id
ET.SubElement(server_elm, "start").text = str(
ex_is_started).lower()
if password is not None:
ET.SubElement(server_elm,
"administratorPassword").text = password
if ex_cpu_specification is not None:
cpu = ET.SubElement(server_elm, "cpu")
cpu.set('speed', ex_cpu_specification.performance)
cpu.set('count', str(ex_cpu_specification.cpu_count))
cpu.set('coresPerSocket',
str(ex_cpu_specification.cores_per_socket))
if ex_memory_gb is not None:
ET.SubElement(server_elm, "memoryGb").text = str(ex_memory_gb)
if ex_network is not None:
network_elm = ET.SubElement(server_elm, "network")
network_id = self._network_to_network_id(ex_network)
ET.SubElement(network_elm, "networkId").text = network_id
if ex_primary_dns:
dns_elm = ET.SubElement(server_elm, "primaryDns")
dns_elm.text = ex_primary_dns
if ex_secondary_dns:
dns_elm = ET.SubElement(server_elm, "secondaryDns")
dns_elm.text = ex_secondary_dns
response = self.connection.request_with_orgId_api_2(
'server/deployServer',
method='POST',
data=ET.tostring(server_elm)).object
node_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'serverId':
node_id = info.get('value')
node = self.ex_get_node_by_id(node_id)
if image_needs_auth:
if getattr(auth_obj, "generated", False):
node.extra['password'] = auth_obj.password
return node
def create_node(self, name,
image,
auth,
ex_network_domain=None,
ex_primary_nic_private_ipv4=None,
ex_primary_nic_vlan=None,
ex_primary_nic_network_adapter=None,
ex_additional_nics=None,
ex_description=None,
ex_disks=None,
ex_cpu_specification=None,
ex_memory_gb=None,
ex_is_started=True,
ex_primary_dns=None,
ex_secondary_dns=None,
ex_ipv4_gateway=None,
ex_microsoft_time_zone=None,
**kwargs
):
"""
Create a new DimensionData node in MCP2. However, it is still
backward compatible for MCP1 for a limited time. Please consider
using MCP2 datacenter as MCP1 will phase out soon.
Legacy Create Node for MCP1 datacenter
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.base import NodeAuthPassword
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = False
>>> DimensionData = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Password
>>> root_pw = NodeAuthPassword('password123')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU1')
>>>
>>> # Get network by location
>>> my_network = driver.list_networks(location=location)[0]
>>> pprint(my_network)
>>>
>>> # Get Image
>>> images = driver.list_images(location=location)
>>> image = images[0]
>>>
>>> node = driver.create_node(name='test_blah_2', image=image,
>>> auth=root_pw,
>>> ex_description='test3 node',
>>> ex_network=my_network,
>>> ex_is_started=False)
>>> pprint(node)
Create Node in MCP2 Data Center
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.base import NodeAuthPassword
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Password
>>> root_pw = NodeAuthPassword('password123')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>> vlan = driver.ex_list_vlans(location=location,
>>> network_domain=my_network_domain)[0]
>>> pprint(vlan)
>>>
>>> # Get Image
>>> images = driver.list_images(location=location)
>>> image = images[0]
>>>
>>> # Create node using vlan instead of private IPv4
>>> node = driver.create_node(name='test_server_01', image=image,
>>> auth=root_pw,
>>> ex_description='test2 node',
>>> ex_network_domain=my_network_domain,
>>> ex_primary_nic_vlan=vlan,
>>> ex_is_started=False)
>>>
>>> # Option: Create node using private IPv4 instead of vlan
>>> # node = driver.create_node(name='test_server_02', image=image,
>>> # auth=root_pw,
>>> # ex_description='test2 node',
>>> # ex_network_domain=my_network_domain,
>>> # ex_primary_nic_private_ipv4='10.1.1.7',
>>> # ex_is_started=False)
>>>
>>> # Option: Create node using by specifying Network Adapter
>>> # node = driver.create_node(name='test_server_03', image=image,
>>> # auth=root_pw,
>>> # ex_description='test2 node',
>>> # ex_network_domain=my_network_domain,
>>> # ex_primary_nic_vlan=vlan,
>>> # ex_primary_nic_network_adapter='E1000',
>>> # ex_is_started=False)
>>>
:keyword name: (required) String with a name for this new node
:type name: ``str``
:keyword image: (required) OS Image to boot on node.
:type image: :class:`NodeImage` or ``str``
:keyword auth: Initial authentication information for the
node. (If this is a customer LINUX
image auth will be ignored)
:type auth: :class:`NodeAuthPassword` or ``str`` or ``None``
:keyword ex_description: (optional) description for this node
:type ex_description: ``str``
:keyword ex_network_domain: (required) Network Domain or Network
Domain ID to create the node
:type ex_network_domain: :class:`DimensionDataNetworkDomain`
or ``str``
:keyword ex_primary_nic_private_ipv4: Provide private IPv4. Ignore
if ex_primary_nic_vlan is
provided. Use one or the
other. Not both.
:type ex_primary_nic_private_ipv4: :``str``
:keyword ex_primary_nic_vlan: Provide VLAN for the node if
ex_primary_nic_private_ipv4 NOT
provided. One or the other. Not both.
:type ex_primary_nic_vlan: :class: DimensionDataVlan or ``str``
:keyword ex_primary_nic_network_adapter: (Optional) Default value
for the Operating System
will be used if leave
empty. Example: "E1000".
:type ex_primary_nic_network_adapter: :``str``
:keyword ex_additional_nics: (optional) List
:class:'DimensionDataNic' or None
:type ex_additional_nics: ``list`` of :class:'DimensionDataNic'
or ``str``
:keyword ex_memory_gb: (optional) The amount of memory in GB for
the server Can be used to override the
memory value inherited from the source
Server Image.
:type ex_memory_gb: ``int``
:keyword ex_cpu_specification: (optional) The spec of CPU to deploy
:type ex_cpu_specification:
:class:`DimensionDataServerCpuSpecification`
:keyword ex_is_started: (required) Start server after creation.
Default is set to true.
:type ex_is_started: ``bool``
:keyword ex_primary_dns: (Optional) The node's primary DNS
:type ex_primary_dns: ``str``
:keyword ex_secondary_dns: (Optional) The node's secondary DNS
:type ex_secondary_dns: ``str``
:keyword ex_ipv4_gateway: (Optional) IPv4 address in dot-decimal
notation, which will be used as the
Primary NIC gateway instead of the default
gateway assigned by the system. If
ipv4Gateway is provided it does not have
to be on the VLAN of the Primary NIC
but MUST be reachable or the Guest OS
will not be configured correctly.
:type ex_ipv4_gateway: ``str``
:keyword ex_disks: (optional) Dimensiondata disks. Optional disk
elements can be used to define the disk speed
that each disk on the Server; inherited from the
source Server Image will be deployed to. It is
not necessary to include a diskelement for every
disk; only those that you wish to set a disk
speed value for. Note that scsiId 7 cannot be
used.Up to 13 disks can be present in addition to
the required OS disk on SCSI ID 0. Refer to
https://docs.mcp-services.net/x/UwIu for disk
:type ex_disks: List or tuple of :class:'DimensionDataServerDisk`
:keyword ex_microsoft_time_zone: (optional) For use with
Microsoft Windows source Server Images only. For the exact
value to use please refer to the table of time zone
indexes in the following Microsoft Technet
documentation. If none is supplied, the default time
zone for the data center geographic region will be used.
:type ex_microsoft_time_zone: `str``
:return: The newly created :class:`Node`.
:rtype: :class:`Node`
"""
# Neither legacy MCP1 network nor MCP2 network domain provided
if ex_network_domain is None and 'ex_network' not in kwargs:
raise ValueError('You must provide either ex_network_domain '
'for MCP2 or ex_network for legacy MCP1')
# Ambiguous parameter provided. Can't determine if it is MCP 1 or 2.
if ex_network_domain is not None and 'ex_network' in kwargs:
raise ValueError('You can only supply either '
'ex_network_domain '
'for MCP2 or ex_network for legacy MCP1')
# Set ex_is_started to False by default if none bool data type provided
if not isinstance(ex_is_started, bool):
ex_is_started = True
# Handle MCP1 legacy
if 'ex_network' in kwargs:
new_node = self._create_node_mcp1(
name=name, image=image, auth=auth,
ex_network=kwargs.get("ex_network"),
ex_description=ex_description,
ex_memory_gb=ex_memory_gb,
ex_cpu_specification=ex_cpu_specification,
ex_is_started=ex_is_started,
ex_primary_ipv4=ex_primary_nic_private_ipv4,
ex_disks=ex_disks,
ex_additional_nics_vlan=kwargs.get("ex_additional_nics_vlan"),
ex_additional_nics_ipv4=kwargs.get("ex_additional_nics_ipv4"),
ex_primary_dns=ex_primary_dns,
ex_secondary_dns=ex_secondary_dns
)
else:
# Handle MCP2 legacy. CaaS api 2.2 or earlier
if 'ex_vlan' in kwargs:
ex_primary_nic_vlan = kwargs.get('ex_vlan')
if 'ex_primary_ipv4' in kwargs:
ex_primary_nic_private_ipv4 = kwargs.get(
'ex_primary_ipv4')
additional_nics = []
if 'ex_additional_nics_vlan' in kwargs:
vlans = kwargs.get('ex_additional_nics_vlan')
if isinstance(vlans, (list, tuple)):
for v in vlans:
add_nic = DimensionDataNic(vlan=v)
additional_nics.append(add_nic)
else:
raise TypeError("ex_additional_nics_vlan must "
"be None or a tuple/list")
if 'ex_additional_nics_ipv4' in kwargs:
ips = kwargs.get('ex_additional_nics_ipv4')
if isinstance(ips, (list, tuple)):
for ip in ips:
add_nic = DimensionDataNic(private_ip_v4=ip)
additional_nics.append(add_nic)
else:
if ips is not None:
raise TypeError("ex_additional_nics_ipv4 must "
"be None or a tuple/list")
if ('ex_additional_nics_vlan' in kwargs or
'ex_additional_nics_ipv4' in kwargs):
ex_additional_nics = additional_nics
# Handle MCP2 latest. CaaS API 2.3 onwards
if ex_network_domain is None:
raise ValueError("ex_network_domain must be specified")
password = None
image_needs_auth = self._image_needs_auth(image)
if image_needs_auth:
if isinstance(auth, basestring):
auth_obj = NodeAuthPassword(password=auth)
password = auth
else:
auth_obj = self._get_and_check_auth(auth)
password = auth_obj.password
server_elm = ET.Element('deployServer', {'xmlns': TYPES_URN})
ET.SubElement(server_elm, "name").text = name
ET.SubElement(server_elm, "description").text = ex_description
image_id = self._image_to_image_id(image)
ET.SubElement(server_elm, "imageId").text = image_id
ET.SubElement(server_elm, "start").text = str(
ex_is_started).lower()
if password is not None:
ET.SubElement(server_elm,
"administratorPassword").text = password
if ex_cpu_specification is not None:
cpu = ET.SubElement(server_elm, "cpu")
cpu.set('speed', ex_cpu_specification.performance)
cpu.set('count', str(ex_cpu_specification.cpu_count))
cpu.set('coresPerSocket',
str(ex_cpu_specification.cores_per_socket))
if ex_memory_gb is not None:
ET.SubElement(server_elm, "memoryGb").text = str(ex_memory_gb)
if (ex_primary_nic_private_ipv4 is None and
ex_primary_nic_vlan is None):
raise ValueError("Missing argument. Either "
"ex_primary_nic_private_ipv4 or "
"ex_primary_nic_vlan "
"must be specified.")
if (ex_primary_nic_private_ipv4 is not None and
ex_primary_nic_vlan is not None):
raise ValueError("Either ex_primary_nic_private_ipv4 or "
"ex_primary_nic_vlan "
"be specified. Not both.")
network_elm = ET.SubElement(server_elm, "networkInfo")
net_domain_id = self._network_domain_to_network_domain_id(
ex_network_domain)
network_elm.set('networkDomainId', net_domain_id)
pri_nic = ET.SubElement(network_elm, 'primaryNic')
if ex_primary_nic_private_ipv4 is not None:
ET.SubElement(pri_nic,
'privateIpv4').text = ex_primary_nic_private_ipv4
if ex_primary_nic_vlan is not None:
vlan_id = self._vlan_to_vlan_id(ex_primary_nic_vlan)
ET.SubElement(pri_nic, 'vlanId').text = vlan_id
if ex_primary_nic_network_adapter is not None:
ET.SubElement(pri_nic,
"networkAdapter").text = \
ex_primary_nic_network_adapter
if isinstance(ex_additional_nics, (list, tuple)):
for nic in ex_additional_nics:
additional_nic = ET.SubElement(network_elm,
'additionalNic')
if (nic.private_ip_v4 is None and
nic.vlan is None):
raise ValueError("Either a vlan or private_ip_v4 "
"must be specified for each "
"additional nic.")
if (nic.private_ip_v4 is not None and
nic.vlan is not None):
raise ValueError("Either a vlan or private_ip_v4 "
"must be specified for each "
"additional nic. Not both.")
if nic.private_ip_v4 is not None:
ET.SubElement(additional_nic,
'privateIpv4').text = nic.private_ip_v4
if nic.vlan is not None:
vlan_id = self._vlan_to_vlan_id(nic.vlan)
ET.SubElement(additional_nic, 'vlanId').text = vlan_id
if nic.network_adapter_name is not None:
ET.SubElement(additional_nic,
"networkAdapter").text = \
nic.network_adapter_name
elif ex_additional_nics is not None:
raise TypeError(
"ex_additional_NICs must be None or tuple/list")
if ex_primary_dns:
dns_elm = ET.SubElement(server_elm, "primaryDns")
dns_elm.text = ex_primary_dns
if ex_secondary_dns:
dns_elm = ET.SubElement(server_elm, "secondaryDns")
dns_elm.text = ex_secondary_dns
if ex_ipv4_gateway:
ET.SubElement(server_elm, "ipv4Gateway").text = ex_ipv4_gateway
if isinstance(ex_disks, (list, tuple)):
for disk in ex_disks:
disk_elm = ET.SubElement(server_elm, 'disk')
disk_elm.set('scsiId', disk.scsi_id)
disk_elm.set('speed', disk.speed)
elif ex_disks is not None:
raise TypeError("ex_disks must be None or tuple/list")
if ex_microsoft_time_zone:
ET.SubElement(server_elm,
"microsoftTimeZone").text = \
ex_microsoft_time_zone
response = self.connection.request_with_orgId_api_2(
'server/deployServer',
method='POST',
data=ET.tostring(server_elm)).object
node_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'serverId':
node_id = info.get('value')
new_node = self.ex_get_node_by_id(node_id)
if image_needs_auth:
if getattr(auth_obj, "generated", False):
new_node.extra['password'] = auth_obj.password
return new_node
def destroy_node(self, node):
"""
Deletes a node, node must be stopped before deletion
:keyword node: The node to delete
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('deleteServer',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/deleteServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def reboot_node(self, node):
"""
Reboots a node by requesting the OS restart via the hypervisor
:keyword node: The node to reboot
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('rebootServer',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/rebootServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def list_nodes(self, ex_location=None, ex_name=None,
ex_ipv6=None, ex_ipv4=None, ex_vlan=None,
ex_image=None, ex_deployed=None,
ex_started=None, ex_state=None,
ex_network=None, ex_network_domain=None):
"""
List nodes deployed for your organization.
:keyword ex_location: Filters the node list to nodes that are
located in this location
:type ex_location: :class:`NodeLocation` or ``str``
:keyword ex_name: Filters the node list to nodes that have this name
:type ex_name ``str``
:keyword ex_ipv6: Filters the node list to nodes that have this
ipv6 address
:type ex_ipv6: ``str``
:keyword ex_ipv4: Filters the node list to nodes that have this
ipv4 address
:type ex_ipv4: ``str``
:keyword ex_vlan: Filters the node list to nodes that are in this VLAN
:type ex_vlan: :class:`DimensionDataVlan` or ``str``
:keyword ex_image: Filters the node list to nodes that have this image
:type ex_image: :class:`NodeImage` or ``str``
:keyword ex_deployed: Filters the node list to nodes that are
deployed or not
:type ex_deployed: ``bool``
:keyword ex_started: Filters the node list to nodes that are
started or not
:type ex_started: ``bool``
:keyword ex_state: Filters the node list by nodes that are in
this state
:type ex_state: ``str``
:keyword ex_network: Filters the node list to nodes in this network
:type ex_network: :class:`DimensionDataNetwork` or ``str``
:keyword ex_network_domain: Filters the node list to nodes in this
network domain
:type ex_network_domain: :class:`DimensionDataNetworkDomain`
or ``str``
:return: a list of `Node` objects
:rtype: ``list`` of :class:`Node`
"""
node_list = []
for nodes in self.ex_list_nodes_paginated(
location=ex_location,
name=ex_name, ipv6=ex_ipv6,
ipv4=ex_ipv4, vlan=ex_vlan,
image=ex_image, deployed=ex_deployed,
started=ex_started, state=ex_state,
network=ex_network,
network_domain=ex_network_domain):
node_list.extend(nodes)
return node_list
def list_images(self, location=None):
"""
List images available
Note: Currently only returns the default 'base OS images'
provided by DimensionData. Customer images (snapshots)
use ex_list_customer_images
:keyword ex_location: Filters the node list to nodes that are
located in this location
:type ex_location: :class:`NodeLocation` or ``str``
:return: List of images available
:rtype: ``list`` of :class:`NodeImage`
"""
params = {}
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
return self._to_images(
self.connection.request_with_orgId_api_2(
'image/osImage',
params=params)
.object)
def list_sizes(self, location=None):
"""
return a list of available sizes
Currently, the size of the node is dictated by the chosen OS base
image, they cannot be set explicitly.
@inherits: :class:`NodeDriver.list_sizes`
"""
return [
NodeSize(id=1,
name="default",
ram=0,
disk=0,
bandwidth=0,
price=0,
driver=self.connection.driver),
]
def list_locations(self, ex_id=None):
"""
List locations (datacenters) available for instantiating servers and
networks.
:keyword ex_id: Filters the location list to this id
:type ex_id: ``str``
:return: List of locations
:rtype: ``list`` of :class:`NodeLocation`
"""
params = {}
if ex_id is not None:
params['id'] = ex_id
return self._to_locations(
self.connection
.request_with_orgId_api_2(
'infrastructure/datacenter',
params=params
).object
)
def list_networks(self, location=None):
"""
List networks deployed across all data center locations for your
organization. The response includes the location of each network.
:keyword location: The location
:type location: :class:`NodeLocation` or ``str``
:return: a list of DimensionDataNetwork objects
:rtype: ``list`` of :class:`DimensionDataNetwork`
"""
url_ext = ''
if location is not None:
url_ext = '/' + self._location_to_location_id(location)
return self._to_networks(
self.connection
.request_with_orgId_api_1('networkWithLocation%s' % url_ext)
.object)
def import_image(self, ovf_package_name, name,
cluster_id=None, datacenter_id=None, description=None,
is_guest_os_customization=None,
tagkey_name_value_dictionaries=None):
"""
Import image
:param ovf_package_name: Image OVF package name
:type ovf_package_name: ``str``
:param name: Image name
:type name: ``str``
:param cluster_id: Provide either cluster_id or datacenter_id
:type cluster_id: ``str``
:param datacenter_id: Provide either cluster_id or datacenter_id
:type datacenter_id: ``str``
:param description: Optional. Description of image
:type description: ``str``
:param is_guest_os_customization: Optional. true for NGOC image
:type is_guest_os_customization: ``bool``
:param tagkey_name_value_dictionaries: Optional tagkey name value dict
:type tagkey_name_value_dictionaries: dictionaries
:return: Return true if successful
:rtype: ``bool``
"""
# Unsupported for version lower than 2.4
if LooseVersion(self.connection.active_api_version) < LooseVersion(
'2.4'):
raise Exception("import image is feature is NOT supported in "
"api version earlier than 2.4")
elif cluster_id is None and datacenter_id is None:
raise ValueError("Either cluster_id or datacenter_id must be "
"provided")
elif cluster_id is not None and datacenter_id is not None:
raise ValueError("Cannot accept both cluster_id and "
"datacenter_id. Please provide either one")
else:
import_image_elem = ET.Element(
'urn:importImage',
{
'xmlns:urn': TYPES_URN,
})
ET.SubElement(
import_image_elem,
'urn:ovfPackage'
).text = ovf_package_name
ET.SubElement(
import_image_elem,
'urn:name'
).text = name
if description is not None:
ET.SubElement(
import_image_elem,
'urn:description'
).text = description
if cluster_id is not None:
ET.SubElement(
import_image_elem,
'urn:clusterId'
).text = cluster_id
else:
ET.SubElement(
import_image_elem,
'urn:datacenterId'
).text = datacenter_id
if is_guest_os_customization is not None:
ET.SubElement(
import_image_elem,
'urn:guestOsCustomization'
).text = is_guest_os_customization
if len(tagkey_name_value_dictionaries) > 0:
for k, v in tagkey_name_value_dictionaries.items():
tag_elem = ET.SubElement(
import_image_elem,
'urn:tag')
ET.SubElement(tag_elem,
'urn:tagKeyName').text = k
if v is not None:
ET.SubElement(tag_elem,
'urn:value').text = v
response = self.connection.request_with_orgId_api_2(
'image/importImage',
method='POST',
data=ET.tostring(import_image_elem)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_nodes_paginated(self, name=None, location=None,
ipv6=None, ipv4=None, vlan=None,
image=None, deployed=None, started=None,
state=None, network=None, network_domain=None):
"""
Return a generator which yields node lists in pages
:keyword location: Filters the node list to nodes that are
located in this location
:type location: :class:`NodeLocation` or ``str``
:keyword name: Filters the node list to nodes that have this name
:type name ``str``
:keyword ipv6: Filters the node list to nodes that have this
ipv6 address
:type ipv6: ``str``
:keyword ipv4: Filters the node list to nodes that have this
ipv4 address
:type ipv4: ``str``
:keyword vlan: Filters the node list to nodes that are in this VLAN
:type vlan: :class:`DimensionDataVlan` or ``str``
:keyword image: Filters the node list to nodes that have this image
:type image: :class:`NodeImage` or ``str``
:keyword deployed: Filters the node list to nodes that are
deployed or not
:type deployed: ``bool``
:keyword started: Filters the node list to nodes that are
started or not
:type started: ``bool``
:keyword state: Filters the node list to nodes that are in
this state
:type state: ``str``
:keyword network: Filters the node list to nodes in this network
:type network: :class:`DimensionDataNetwork` or ``str``
:keyword network_domain: Filters the node list to nodes in this
network domain
:type network_domain: :class:`DimensionDataNetworkDomain`
or ``str``
:return: a list of `Node` objects
:rtype: ``generator`` of `list` of :class:`Node`
"""
params = {}
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
if ipv6 is not None:
params['ipv6'] = ipv6
if ipv4 is not None:
params['privateIpv4'] = ipv4
if state is not None:
params['state'] = state
if started is not None:
params['started'] = started
if deployed is not None:
params['deployed'] = deployed
if name is not None:
params['name'] = name
if network_domain is not None:
params['networkDomainId'] = \
self._network_domain_to_network_domain_id(network_domain)
if network is not None:
params['networkId'] = self._network_to_network_id(network)
if vlan is not None:
params['vlanId'] = self._vlan_to_vlan_id(vlan)
if image is not None:
params['sourceImageId'] = self._image_to_image_id(image)
nodes_obj = self._list_nodes_single_page(params)
yield self._to_nodes(nodes_obj)
while nodes_obj.get('pageCount') >= nodes_obj.get('pageSize'):
params['pageNumber'] = int(nodes_obj.get('pageNumber')) + 1
nodes_obj = self._list_nodes_single_page(params)
yield self._to_nodes(nodes_obj)
def ex_start_node(self, node):
"""
Powers on an existing deployed server
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('startServer',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/startServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_shutdown_graceful(self, node):
"""
This function will attempt to "gracefully" stop a server by
initiating a shutdown sequence within the guest operating system.
A successful response on this function means the system has
successfully passed the request into the operating system.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('shutdownServer',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/shutdownServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_power_off(self, node):
"""
This function will abruptly power-off a server. Unlike
ex_shutdown_graceful, success ensures the node will stop but some OS
and application configurations may be adversely affected by the
equivalent of pulling the power plug out of the machine.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('powerOffServer',
{'xmlns': TYPES_URN, 'id': node.id})
try:
body = self.connection.request_with_orgId_api_2(
'server/powerOffServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
except (DimensionDataAPIException, NameError, BaseHTTPError):
r = self.ex_get_node_by_id(node.id)
response_code = r.state.upper()
return response_code in ['IN_PROGRESS', 'OK', 'STOPPED', 'STOPPING']
def ex_reset(self, node):
"""
This function will abruptly reset a server. Unlike
reboot_node, success ensures the node will restart but some OS
and application configurations may be adversely affected by the
equivalent of pulling the power plug out of the machine.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('resetServer',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/resetServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_update_vm_tools(self, node):
"""
This function triggers an update of the VMware Tools
software running on the guest OS of a Server.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
request_elm = ET.Element('updateVmwareTools',
{'xmlns': TYPES_URN, 'id': node.id})
body = self.connection.request_with_orgId_api_2(
'server/updateVmwareTools',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_update_node(self, node, name=None, description=None,
cpu_count=None, ram_mb=None):
"""
Update the node, the name, CPU or RAM
:param node: Node which should be used
:type node: :class:`Node`
:param name: The new name (optional)
:type name: ``str``
:param description: The new description (optional)
:type description: ``str``
:param cpu_count: The new CPU count (optional)
:type cpu_count: ``int``
:param ram_mb: The new Memory in MB (optional)
:type ram_mb: ``int``
:rtype: ``bool``
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if cpu_count is not None:
data['cpuCount'] = str(cpu_count)
if ram_mb is not None:
data['memory'] = str(ram_mb)
body = self.connection.request_with_orgId_api_1(
'server/%s' % (node.id),
method='POST',
data=urlencode(data, True)).object
response_code = findtext(body, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_create_anti_affinity_rule(self, node_list):
"""
Create an anti affinity rule given a list of nodes
Anti affinity rules ensure that servers will not reside
on the same VMware ESX host
:param node_list: The list of nodes to create a rule for
:type node_list: ``list`` of :class:`Node` or
``list`` of ``str``
:rtype: ``bool``
"""
if not isinstance(node_list, (list, tuple)):
raise TypeError("Node list must be a list or a tuple.")
anti_affinity_xml_request = ET.Element('NewAntiAffinityRule',
{'xmlns': SERVER_NS})
for node in node_list:
ET.SubElement(anti_affinity_xml_request, 'serverId').text = \
self._node_to_node_id(node)
result = self.connection.request_with_orgId_api_1(
'antiAffinityRule',
method='POST',
data=ET.tostring(anti_affinity_xml_request)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_delete_anti_affinity_rule(self, anti_affinity_rule):
"""
Remove anti affinity rule
:param anti_affinity_rule: The anti affinity rule to delete
:type anti_affinity_rule: :class:`DimensionDataAntiAffinityRule` or
``str``
:rtype: ``bool``
"""
rule_id = self._anti_affinity_rule_to_anti_affinity_rule_id(
anti_affinity_rule)
result = self.connection.request_with_orgId_api_1(
'antiAffinityRule/%s?delete' % (rule_id),
method='GET').object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_list_anti_affinity_rules(self, network=None, network_domain=None,
node=None, filter_id=None,
filter_state=None):
"""
List anti affinity rules for a network, network domain, or node
:param network: The network to list anti affinity rules for
One of network, network_domain, or node is required
:type network: :class:`DimensionDataNetwork` or ``str``
:param network_domain: The network domain to list anti affinity rules
One of network, network_domain,
or node is required
:type network_domain: :class:`DimensionDataNetworkDomain` or ``str``
:param node: The node to list anti affinity rules for
One of network, netwok_domain, or node is required
:type node: :class:`Node` or ``str``
:param filter_id: This will allow you to filter the rules
by this node id
:type filter_id: ``str``
:type filter_state: This will allow you to filter rules by
node state (i.e. NORMAL)
:type filter_state: ``str``
:rtype: ``list`` of :class:`DimensionDataAntiAffinityRule`
"""
not_none_arguments = [key
for key in (network, network_domain, node)
if key is not None]
if len(not_none_arguments) != 1:
raise ValueError("One and ONLY one of network, "
"network_domain, or node must be set")
params = {}
if network_domain is not None:
params['networkDomainId'] = \
self._network_domain_to_network_domain_id(network_domain)
if network is not None:
params['networkId'] = \
self._network_to_network_id(network)
if node is not None:
params['serverId'] = \
self._node_to_node_id(node)
if filter_id is not None:
params['id'] = filter_id
if filter_state is not None:
params['state'] = filter_state
paged_result = self.connection.paginated_request_with_orgId_api_2(
'server/antiAffinityRule',
method='GET',
params=params
)
rules = []
for result in paged_result:
rules.extend(self._to_anti_affinity_rules(result))
return rules
def ex_attach_node_to_vlan(self, node, vlan=None, private_ipv4=None):
"""
Attach a node to a VLAN by adding an additional NIC to
the node on the target VLAN. The IP will be automatically
assigned based on the VLAN IP network space. Alternatively, provide
a private IPv4 address instead of VLAN information, and this will
be assigned to the node on corresponding NIC.
:param node: Node which should be used
:type node: :class:`Node`
:param vlan: VLAN to attach the node to
(required unless private_ipv4)
:type vlan: :class:`DimensionDataVlan`
:keyword private_ipv4: Private nic IPv4 Address
(required unless vlan)
:type private_ipv4: ``str``
:rtype: ``bool``
"""
request = ET.Element('addNic',
{'xmlns': TYPES_URN})
ET.SubElement(request, 'serverId').text = node.id
nic = ET.SubElement(request, 'nic')
if vlan is not None:
ET.SubElement(nic, 'vlanId').text = vlan.id
elif private_ipv4 is not None:
ET.SubElement(nic, 'privateIpv4').text = private_ipv4
else:
raise ValueError("One of vlan or primary_ipv4 "
"must be specified")
response = self.connection.request_with_orgId_api_2(
'server/addNic',
method='POST',
data=ET.tostring(request)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_destroy_nic(self, nic_id):
"""
Remove a NIC on a node, removing the node from a VLAN
:param nic_id: The identifier of the NIC to remove
:type nic_id: ``str``
:rtype: ``bool``
"""
request = ET.Element('removeNic',
{'xmlns': TYPES_URN,
'id': nic_id})
response = self.connection.request_with_orgId_api_2(
'server/removeNic',
method='POST',
data=ET.tostring(request)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_networks(self, location=None):
"""
List networks deployed across all data center locations for your
organization. The response includes the location of each network.
:param location: The target location
:type location: :class:`NodeLocation` or ``str``
:return: a list of DimensionDataNetwork objects
:rtype: ``list`` of :class:`DimensionDataNetwork`
"""
return self.list_networks(location=location)
def ex_create_network(self, location, name, description=None):
"""
Create a new network in an MCP 1.0 location
:param location: The target location (MCP1)
:type location: :class:`NodeLocation` or ``str``
:param name: The name of the network
:type name: ``str``
:param description: Additional description of the network
:type description: ``str``
:return: A new instance of `DimensionDataNetwork`
:rtype: Instance of :class:`DimensionDataNetwork`
"""
network_location = self._location_to_location_id(location)
create_node = ET.Element('NewNetworkWithLocation',
{'xmlns': NETWORK_NS})
ET.SubElement(create_node, "name").text = name
if description is not None:
ET.SubElement(create_node, "description").text = description
ET.SubElement(create_node, "location").text = network_location
self.connection.request_with_orgId_api_1(
'networkWithLocation',
method='POST',
data=ET.tostring(create_node))
# MCP1 API does not return the ID, but name is unique for location
network = list(
filter(lambda x: x.name == name,
self.ex_list_networks(location)))[0]
return network
def ex_delete_network(self, network):
"""
Delete a network from an MCP 1 data center
:param network: The network to delete
:type network: :class:`DimensionDataNetwork`
:rtype: ``bool``
"""
response = self.connection.request_with_orgId_api_1(
'network/%s?delete' % network.id,
method='GET').object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code == "SUCCESS"
def ex_rename_network(self, network, new_name):
"""
Rename a network in MCP 1 data center
:param network: The network to rename
:type network: :class:`DimensionDataNetwork`
:param new_name: The new name of the network
:type new_name: ``str``
:rtype: ``bool``
"""
response = self.connection.request_with_orgId_api_1(
'network/%s' % network.id,
method='POST',
data='name=%s' % new_name).object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code == "SUCCESS"
def ex_get_network_domain(self, network_domain_id):
"""
Get an individual Network Domain, by identifier
:param network_domain_id: The identifier of the network domain
:type network_domain_id: ``str``
:rtype: :class:`DimensionDataNetworkDomain`
"""
locations = self.list_locations()
net = self.connection.request_with_orgId_api_2(
'network/networkDomain/%s' % network_domain_id).object
return self._to_network_domain(net, locations)
def ex_list_network_domains(self, location=None, name=None,
service_plan=None, state=None):
"""
List networks domains deployed across all data center locations domain.
for your organization.
The response includes the location of each network
:param location: Only network domains in the location (optional)
:type location: :class:`NodeLocation` or ``str``
:param name: Only network domains of this name (optional)
:type name: ``str``
:param service_plan: Only network domains of this type (optional)
:type service_plan: ``str``
:param state: Only network domains in this state (optional)
:type state: ``str``
:return: a list of `DimensionDataNetwork` objects
:rtype: ``list`` of :class:`DimensionDataNetwork`
"""
params = {}
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
if name is not None:
params['name'] = name
if service_plan is not None:
params['type'] = service_plan
if state is not None:
params['state'] = state
response = self.connection \
.request_with_orgId_api_2('network/networkDomain',
params=params).object
return self._to_network_domains(response)
def ex_create_network_domain(self, location, name, service_plan,
description=None):
"""
Deploy a new network domain to a data center
:param location: The data center to list
:type location: :class:`NodeLocation` or ``str``
:param name: The name of the network domain to create
:type name: ``str``
:param service_plan: The service plan, either "ESSENTIALS"
or "ADVANCED"
:type service_plan: ``str``
:param description: An additional description of
the network domain
:type description: ``str``
:return: an instance of `DimensionDataNetworkDomain`
:rtype: :class:`DimensionDataNetworkDomain`
"""
create_node = ET.Element('deployNetworkDomain', {'xmlns': TYPES_URN})
ET.SubElement(
create_node,
"datacenterId"
).text = self._location_to_location_id(location)
ET.SubElement(create_node, "name").text = name
if description is not None:
ET.SubElement(create_node, "description").text = description
ET.SubElement(create_node, "type").text = service_plan
response = self.connection.request_with_orgId_api_2(
'network/deployNetworkDomain',
method='POST',
data=ET.tostring(create_node)).object
network_domain_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'networkDomainId':
network_domain_id = info.get('value')
return DimensionDataNetworkDomain(
id=network_domain_id,
name=name,
description=description,
location=location,
status=NodeState.RUNNING,
plan=service_plan
)
def ex_update_network_domain(self, network_domain):
"""
Update the properties of a network domain
:param network_domain: The network domain with updated properties
:type network_domain: :class:`DimensionDataNetworkDomain`
:return: an instance of `DimensionDataNetworkDomain`
:rtype: :class:`DimensionDataNetworkDomain`
"""
edit_node = ET.Element('editNetworkDomain', {'xmlns': TYPES_URN})
edit_node.set('id', network_domain.id)
ET.SubElement(edit_node, "name").text = network_domain.name
if network_domain.description is not None:
ET.SubElement(edit_node, "description").text \
= network_domain.description
ET.SubElement(edit_node, "type").text = network_domain.plan
self.connection.request_with_orgId_api_2(
'network/editNetworkDomain',
method='POST',
data=ET.tostring(edit_node)).object
return network_domain
def ex_delete_network_domain(self, network_domain):
"""
Delete a network domain
:param network_domain: The network domain to delete
:type network_domain: :class:`DimensionDataNetworkDomain`
:rtype: ``bool``
"""
delete_node = ET.Element('deleteNetworkDomain', {'xmlns': TYPES_URN})
delete_node.set('id', network_domain.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteNetworkDomain',
method='POST',
data=ET.tostring(delete_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_create_vlan(self,
network_domain,
name,
private_ipv4_base_address,
description=None,
private_ipv4_prefix_size=24):
"""
Deploy a new VLAN to a network domain
:param network_domain: The network domain to add the VLAN to
:type network_domain: :class:`DimensionDataNetworkDomain`
:param name: The name of the VLAN to create
:type name: ``str``
:param private_ipv4_base_address: The base IPv4 address
e.g. 192.168.1.0
:type private_ipv4_base_address: ``str``
:param description: An additional description of the VLAN
:type description: ``str``
:param private_ipv4_prefix_size: The size of the IPv4
address space, e.g 24
:type private_ipv4_prefix_size: ``int``
:return: an instance of `DimensionDataVlan`
:rtype: :class:`DimensionDataVlan`
"""
create_node = ET.Element('deployVlan', {'xmlns': TYPES_URN})
ET.SubElement(create_node, "networkDomainId").text = network_domain.id
ET.SubElement(create_node, "name").text = name
if description is not None:
ET.SubElement(create_node, "description").text = description
ET.SubElement(create_node, "privateIpv4BaseAddress").text = \
private_ipv4_base_address
ET.SubElement(create_node, "privateIpv4PrefixSize").text = \
str(private_ipv4_prefix_size)
response = self.connection.request_with_orgId_api_2(
'network/deployVlan',
method='POST',
data=ET.tostring(create_node)).object
vlan_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'vlanId':
vlan_id = info.get('value')
return self.ex_get_vlan(vlan_id)
def ex_get_vlan(self, vlan_id):
"""
Get a single VLAN, by it's identifier
:param vlan_id: The identifier of the VLAN
:type vlan_id: ``str``
:return: an instance of `DimensionDataVlan`
:rtype: :class:`DimensionDataVlan`
"""
locations = self.list_locations()
vlan = self.connection.request_with_orgId_api_2(
'network/vlan/%s' % vlan_id).object
return self._to_vlan(vlan, locations)
def ex_update_vlan(self, vlan):
"""
Updates the properties of the given VLAN
Only name and description are updated
:param vlan: The VLAN to update
:type vlan: :class:`DimensionDataNetworkDomain`
:return: an instance of `DimensionDataVlan`
:rtype: :class:`DimensionDataVlan`
"""
edit_node = ET.Element('editVlan', {'xmlns': TYPES_URN})
edit_node.set('id', vlan.id)
ET.SubElement(edit_node, "name").text = vlan.name
if vlan.description is not None:
ET.SubElement(edit_node, "description").text \
= vlan.description
self.connection.request_with_orgId_api_2(
'network/editVlan',
method='POST',
data=ET.tostring(edit_node)).object
return vlan
def ex_expand_vlan(self, vlan):
"""
Expands the VLAN to the prefix size in private_ipv4_range_size
The expansion will
not be permitted if the proposed IP space overlaps with an
already deployed VLANs IP space.
:param vlan: The VLAN to update
:type vlan: :class:`DimensionDataNetworkDomain`
:return: an instance of `DimensionDataVlan`
:rtype: :class:`DimensionDataVlan`
"""
edit_node = ET.Element('expandVlan', {'xmlns': TYPES_URN})
edit_node.set('id', vlan.id)
ET.SubElement(edit_node, "privateIpv4PrefixSize").text =\
vlan.private_ipv4_range_size
self.connection.request_with_orgId_api_2(
'network/expandVlan',
method='POST',
data=ET.tostring(edit_node)).object
return vlan
def ex_delete_vlan(self, vlan):
"""
Deletes an existing VLAN
:param vlan: The VLAN to delete
:type vlan: :class:`DimensionDataNetworkDomain`
:rtype: ``bool``
"""
delete_node = ET.Element('deleteVlan', {'xmlns': TYPES_URN})
delete_node.set('id', vlan.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteVlan',
method='POST',
data=ET.tostring(delete_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_vlans(self, location=None, network_domain=None, name=None,
ipv4_address=None, ipv6_address=None, state=None):
"""
List VLANs available, can filter by location and/or network domain
:param location: Only VLANs in this location (optional)
:type location: :class:`NodeLocation` or ``str``
:param network_domain: Only VLANs in this domain (optional)
:type network_domain: :class:`DimensionDataNetworkDomain`
:param name: Only VLANs with this name (optional)
:type name: ``str``
:param ipv4_address: Only VLANs with this ipv4 address (optional)
:type ipv4_address: ``str``
:param ipv6_address: Only VLANs with this ipv6 address (optional)
:type ipv6_address: ``str``
:param state: Only VLANs with this state (optional)
:type state: ``str``
:return: a list of DimensionDataVlan objects
:rtype: ``list`` of :class:`DimensionDataVlan`
"""
params = {}
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
if network_domain is not None:
params['networkDomainId'] = \
self._network_domain_to_network_domain_id(network_domain)
if name is not None:
params['name'] = name
if ipv4_address is not None:
params['privateIpv4Address'] = ipv4_address
if ipv6_address is not None:
params['ipv6Address'] = ipv6_address
if state is not None:
params['state'] = state
response = self.connection.request_with_orgId_api_2('network/vlan',
params=params) \
.object
return self._to_vlans(response)
def ex_add_public_ip_block_to_network_domain(self, network_domain):
add_node = ET.Element('addPublicIpBlock', {'xmlns': TYPES_URN})
ET.SubElement(add_node, "networkDomainId").text =\
network_domain.id
response = self.connection.request_with_orgId_api_2(
'network/addPublicIpBlock',
method='POST',
data=ET.tostring(add_node)).object
block_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'ipBlockId':
block_id = info.get('value')
return self.ex_get_public_ip_block(block_id)
def ex_list_public_ip_blocks(self, network_domain):
params = {}
params['networkDomainId'] = network_domain.id
response = self.connection \
.request_with_orgId_api_2('network/publicIpBlock',
params=params).object
return self._to_ip_blocks(response)
def ex_get_public_ip_block(self, block_id):
locations = self.list_locations()
block = self.connection.request_with_orgId_api_2(
'network/publicIpBlock/%s' % block_id).object
return self._to_ip_block(block, locations)
def ex_delete_public_ip_block(self, block):
delete_node = ET.Element('removePublicIpBlock', {'xmlns': TYPES_URN})
delete_node.set('id', block.id)
result = self.connection.request_with_orgId_api_2(
'network/removePublicIpBlock',
method='POST',
data=ET.tostring(delete_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_node_by_id(self, id):
node = self.connection.request_with_orgId_api_2(
'server/server/%s' % id).object
return self._to_node(node)
def ex_list_firewall_rules(self, network_domain, page_size=50,
page_number=1):
params = {'pageSize': page_size, 'pageNumber': page_number}
params['networkDomainId'] = self._network_domain_to_network_domain_id(
network_domain)
response = self.connection \
.request_with_orgId_api_2('network/firewallRule',
params=params).object
return self._to_firewall_rules(response, network_domain)
def ex_create_firewall_rule(self, network_domain, rule, position,
position_relative_to_rule=None):
"""
Creates a firewall rule
:param network_domain: The network domain in which to create
the firewall rule
:type network_domain: :class:`DimensionDataNetworkDomain` or ``str``
:param rule: The rule in which to create
:type rule: :class:`DimensionDataFirewallRule`
:param position: The position in which to create the rule
There are two types of positions
with position_relative_to_rule arg and without it
With: 'BEFORE' or 'AFTER'
Without: 'FIRST' or 'LAST'
:type position: ``str``
:param position_relative_to_rule: The rule or rule name in
which to decide positioning by
:type position_relative_to_rule:
:class:`DimensionDataFirewallRule` or ``str``
:rtype: ``bool``
"""
positions_without_rule = ('FIRST', 'LAST')
positions_with_rule = ('BEFORE', 'AFTER')
create_node = ET.Element('createFirewallRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, "networkDomainId").text = \
self._network_domain_to_network_domain_id(network_domain)
ET.SubElement(create_node, "name").text = rule.name
ET.SubElement(create_node, "action").text = rule.action
ET.SubElement(create_node, "ipVersion").text = rule.ip_version
ET.SubElement(create_node, "protocol").text = rule.protocol
# Setup source port rule
source = ET.SubElement(create_node, "source")
if rule.source.address_list_id is not None:
source_ip = ET.SubElement(source, 'ipAddressListId')
source_ip.text = rule.source.address_list_id
else:
source_ip = ET.SubElement(source, 'ip')
if rule.source.any_ip:
source_ip.set('address', 'ANY')
else:
source_ip.set('address', rule.source.ip_address)
if rule.source.ip_prefix_size is not None:
source_ip.set('prefixSize',
str(rule.source.ip_prefix_size))
if rule.source.port_list_id is not None:
source_port = ET.SubElement(source, 'portListId')
source_port.text = rule.source.port_list_id
else:
if rule.source.port_begin is not None:
source_port = ET.SubElement(source, 'port')
source_port.set('begin', rule.source.port_begin)
if rule.source.port_end is not None:
source_port.set('end', rule.source.port_end)
# Setup destination port rule
dest = ET.SubElement(create_node, "destination")
if rule.destination.address_list_id is not None:
dest_ip = ET.SubElement(dest, 'ipAddressListId')
dest_ip.text = rule.destination.address_list_id
else:
dest_ip = ET.SubElement(dest, 'ip')
if rule.destination.any_ip:
dest_ip.set('address', 'ANY')
else:
dest_ip.set('address', rule.destination.ip_address)
if rule.destination.ip_prefix_size is not None:
dest_ip.set('prefixSize', rule.destination.ip_prefix_size)
if rule.destination.port_list_id is not None:
dest_port = ET.SubElement(dest, 'portListId')
dest_port.text = rule.destination.port_list_id
else:
if rule.destination.port_begin is not None:
dest_port = ET.SubElement(dest, 'port')
dest_port.set('begin', rule.destination.port_begin)
if rule.destination.port_end is not None:
dest_port.set('end', rule.destination.port_end)
# Set up positioning of rule
ET.SubElement(create_node, "enabled").text = str(rule.enabled).lower()
placement = ET.SubElement(create_node, "placement")
if position_relative_to_rule is not None:
if position not in positions_with_rule:
raise ValueError("When position_relative_to_rule is specified"
" position must be %s"
% ', '.join(positions_with_rule))
if isinstance(position_relative_to_rule,
DimensionDataFirewallRule):
rule_name = position_relative_to_rule.name
else:
rule_name = position_relative_to_rule
placement.set('relativeToRule', rule_name)
else:
if position not in positions_without_rule:
raise ValueError("When position_relative_to_rule is not"
" specified position must be %s"
% ', '.join(positions_without_rule))
placement.set('position', position)
response = self.connection.request_with_orgId_api_2(
'network/createFirewallRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'firewallRuleId':
rule_id = info.get('value')
rule.id = rule_id
return rule
def ex_edit_firewall_rule(self, rule, position,
relative_rule_for_position=None):
"""
Edit a firewall rule
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>>
>>> # List firewall rules
>>> firewall_rules = driver.ex_list_firewall_rules(my_network_domain)
>>>
>>> # Get Firewall Rule by name
>>> pprint("List specific firewall rule by name")
>>> fire_rule_under_test = (list(filter(lambda x: x.name ==
'My_New_Firewall_Rule', firewall_rules))[0])
>>> pprint(fire_rule_under_test.source)
>>> pprint(fire_rule_under_test.destination)
>>>
>>> # Edit Firewall
>>> fire_rule_under_test.destination.address_list_id =
'5e7c323f-c885-4e4b-9a27-94c44217dbd3'
>>> fire_rule_under_test.destination.port_list_id =
'b6557c5a-45fa-4138-89bd-8fe68392691b'
>>> result = driver.ex_edit_firewall_rule(fire_rule_under_test, 'LAST')
>>> pprint(result)
:param rule: (required) The rule in which to create
:type rule: :class:`DimensionDataFirewallRule`
:param position: (required) There are two types of positions
with position_relative_to_rule arg and without it
With: 'BEFORE' or 'AFTER'
Without: 'FIRST' or 'LAST'
:type position: ``str``
:param relative_rule_for_position: (optional) The rule or rule name in
which to decide the relative rule
for positioning.
:type relative_rule_for_position:
:class:`DimensionDataFirewallRule` or ``str``
:rtype: ``bool``
"""
positions_without_rule = ('FIRST', 'LAST')
positions_with_rule = ('BEFORE', 'AFTER')
edit_node = ET.Element('editFirewallRule',
{'xmlns': TYPES_URN, 'id': rule.id})
ET.SubElement(edit_node, "action").text = rule.action
ET.SubElement(edit_node, "protocol").text = rule.protocol
# Source address
source = ET.SubElement(edit_node, "source")
if rule.source.address_list_id is not None:
source_ip = ET.SubElement(source, 'ipAddressListId')
source_ip.text = rule.source.address_list_id
else:
source_ip = ET.SubElement(source, 'ip')
if rule.source.any_ip:
source_ip.set('address', 'ANY')
else:
source_ip.set('address', rule.source.ip_address)
if rule.source.ip_prefix_size is not None:
source_ip.set('prefixSize',
str(rule.source.ip_prefix_size))
# Setup source port rule
if rule.source.port_list_id is not None:
source_port = ET.SubElement(source, 'portListId')
source_port.text = rule.source.port_list_id
else:
if rule.source.port_begin is not None:
source_port = ET.SubElement(source, 'port')
source_port.set('begin', rule.source.port_begin)
if rule.source.port_end is not None:
source_port.set('end', rule.source.port_end)
# Setup destination port rule
dest = ET.SubElement(edit_node, "destination")
if rule.destination.address_list_id is not None:
dest_ip = ET.SubElement(dest, 'ipAddressListId')
dest_ip.text = rule.destination.address_list_id
else:
dest_ip = ET.SubElement(dest, 'ip')
if rule.destination.any_ip:
dest_ip.set('address', 'ANY')
else:
dest_ip.set('address', rule.destination.ip_address)
if rule.destination.ip_prefix_size is not None:
dest_ip.set('prefixSize', rule.destination.ip_prefix_size)
if rule.destination.port_list_id is not None:
dest_port = ET.SubElement(dest, 'portListId')
dest_port.text = rule.destination.port_list_id
else:
if rule.destination.port_begin is not None:
dest_port = ET.SubElement(dest, 'port')
dest_port.set('begin', rule.destination.port_begin)
if rule.destination.port_end is not None:
dest_port.set('end', rule.destination.port_end)
# Set up positioning of rule
ET.SubElement(edit_node, "enabled").text = str(rule.enabled).lower()
placement = ET.SubElement(edit_node, "placement")
if relative_rule_for_position is not None:
if position not in positions_with_rule:
raise ValueError("When position_relative_to_rule is specified"
" position must be %s"
% ', '.join(positions_with_rule))
if isinstance(relative_rule_for_position,
DimensionDataFirewallRule):
rule_name = relative_rule_for_position.name
else:
rule_name = relative_rule_for_position
placement.set('relativeToRule', rule_name)
else:
if position not in positions_without_rule:
raise ValueError("When position_relative_to_rule is not"
" specified position must be %s"
% ', '.join(positions_without_rule))
placement.set('position', position)
response = self.connection.request_with_orgId_api_2(
'network/editFirewallRule',
method='POST',
data=ET.tostring(edit_node)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_firewall_rule(self, network_domain, rule_id):
locations = self.list_locations()
rule = self.connection.request_with_orgId_api_2(
'network/firewallRule/%s' % rule_id).object
return self._to_firewall_rule(rule, locations, network_domain)
def ex_set_firewall_rule_state(self, rule, state):
"""
Change the state (enabled or disabled) of a rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataFirewallRule`
:param state: The desired state enabled (True) or disabled (False)
:type state: ``bool``
:rtype: ``bool``
"""
update_node = ET.Element('editFirewallRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
ET.SubElement(update_node, 'enabled').text = str(state).lower()
result = self.connection.request_with_orgId_api_2(
'network/editFirewallRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_delete_firewall_rule(self, rule):
"""
Delete a firewall rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataFirewallRule`
:rtype: ``bool``
"""
update_node = ET.Element('deleteFirewallRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteFirewallRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_create_nat_rule(self, network_domain, internal_ip, external_ip):
"""
Create a NAT rule
:param network_domain: The network domain the rule belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:param internal_ip: The IPv4 address internally
:type internal_ip: ``str``
:param external_ip: The IPv4 address externally
:type external_ip: ``str``
:rtype: :class:`DimensionDataNatRule`
"""
create_node = ET.Element('createNatRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, 'networkDomainId').text = network_domain.id
ET.SubElement(create_node, 'internalIp').text = internal_ip
ET.SubElement(create_node, 'externalIp').text = external_ip
result = self.connection.request_with_orgId_api_2(
'network/createNatRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(result, 'info', TYPES_URN):
if info.get('name') == 'natRuleId':
rule_id = info.get('value')
return DimensionDataNatRule(
id=rule_id,
network_domain=network_domain,
internal_ip=internal_ip,
external_ip=external_ip,
status=NodeState.RUNNING
)
def ex_list_nat_rules(self, network_domain):
"""
Get NAT rules for the network domain
:param network_domain: The network domain the rules belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:rtype: ``list`` of :class:`DimensionDataNatRule`
"""
params = {}
params['networkDomainId'] = network_domain.id
response = self.connection \
.request_with_orgId_api_2('network/natRule',
params=params).object
return self._to_nat_rules(response, network_domain)
def ex_get_nat_rule(self, network_domain, rule_id):
"""
Get a NAT rule by ID
:param network_domain: The network domain the rule belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:param rule_id: The ID of the NAT rule to fetch
:type rule_id: ``str``
:rtype: :class:`DimensionDataNatRule`
"""
rule = self.connection.request_with_orgId_api_2(
'network/natRule/%s' % rule_id).object
return self._to_nat_rule(rule, network_domain)
def ex_delete_nat_rule(self, rule):
"""
Delete an existing NAT rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataNatRule`
:rtype: ``bool``
"""
update_node = ET.Element('deleteNatRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteNatRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_location_by_id(self, id):
"""
Get location by ID.
:param id: ID of the node location which should be used
:type id: ``str``
:rtype: :class:`NodeLocation`
"""
location = None
if id is not None:
location = self.list_locations(ex_id=id)[0]
return location
def ex_wait_for_state(self, state, func, poll_interval=2,
timeout=60, *args, **kwargs):
"""
Wait for the function which returns a instance
with field status to match
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
"""
return self.connection.wait_for_state(state, func, poll_interval,
timeout, *args, **kwargs)
def ex_enable_monitoring(self, node, service_plan="ESSENTIALS"):
"""
Enables cloud monitoring on a node
:param node: The node to monitor
:type node: :class:`Node`
:param service_plan: The service plan, one of ESSENTIALS or
ADVANCED
:type service_plan: ``str``
:rtype: ``bool``
"""
update_node = ET.Element('enableServerMonitoring',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
ET.SubElement(update_node, 'servicePlan').text = service_plan
result = self.connection.request_with_orgId_api_2(
'server/enableServerMonitoring',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_update_monitoring_plan(self, node, service_plan="ESSENTIALS"):
"""
Updates the service plan on a node with monitoring
:param node: The node to monitor
:type node: :class:`Node`
:param service_plan: The service plan, one of ESSENTIALS or
ADVANCED
:type service_plan: ``str``
:rtype: ``bool``
"""
update_node = ET.Element('changeServerMonitoringPlan',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
ET.SubElement(update_node, 'servicePlan').text = service_plan
result = self.connection.request_with_orgId_api_2(
'server/changeServerMonitoringPlan',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_disable_monitoring(self, node):
"""
Disables cloud monitoring for a node
:param node: The node to stop monitoring
:type node: :class:`Node`
:rtype: ``bool``
"""
update_node = ET.Element('disableServerMonitoring',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
result = self.connection.request_with_orgId_api_2(
'server/disableServerMonitoring',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_add_storage_to_node(self, node, amount,
speed='STANDARD', scsi_id=None):
"""
Add storage to the node
:param node: The server to add storage to
:type node: :class:`Node`
:param amount: The amount of storage to add, in GB
:type amount: ``int``
:param speed: The disk speed type
:type speed: ``str``
:param scsi_id: The target SCSI ID (optional)
:type scsi_id: ``int``
:rtype: ``bool``
"""
update_node = ET.Element('addDisk',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
ET.SubElement(update_node, 'sizeGb').text = str(amount)
ET.SubElement(update_node, 'speed').text = speed.upper()
if scsi_id is not None:
ET.SubElement(update_node, 'scsiId').text = str(scsi_id)
result = self.connection.request_with_orgId_api_2(
'server/addDisk',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_remove_storage_from_node(self, node, scsi_id):
"""
Remove storage from a node
:param node: The server to add storage to
:type node: :class:`Node`
:param scsi_id: The ID of the disk to remove
:type scsi_id: ``str``
:rtype: ``bool``
"""
disk = [disk for disk in node.extra['disks']
if disk.scsi_id == scsi_id][0]
return self.ex_remove_storage(disk.id)
def ex_remove_storage(self, disk_id):
"""
Remove storage from a node
:param node: The server to add storage to
:type node: :class:`Node`
:param disk_id: The ID of the disk to remove
:type disk_id: ``str``
:rtype: ``bool``
"""
remove_disk = ET.Element('removeDisk',
{'xmlns': TYPES_URN})
remove_disk.set('id', disk_id)
result = self.connection.request_with_orgId_api_2(
'server/removeDisk',
method='POST',
data=ET.tostring(remove_disk)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_change_storage_speed(self, node, disk_id, speed):
"""
Change the speed (disk tier) of a disk
:param node: The server to change the disk speed of
:type node: :class:`Node`
:param disk_id: The ID of the disk to change
:type disk_id: ``str``
:param speed: The disk speed type e.g. STANDARD
:type speed: ``str``
:rtype: ``bool``
"""
create_node = ET.Element('ChangeDiskSpeed', {'xmlns': SERVER_NS})
ET.SubElement(create_node, 'speed').text = speed
result = self.connection.request_with_orgId_api_1(
'server/%s/disk/%s/changeSpeed' %
(node.id, disk_id),
method='POST',
data=ET.tostring(create_node)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_change_storage_size(self, node, disk_id, size):
"""
Change the size of a disk
:param node: The server to change the disk of
:type node: :class:`Node`
:param disk_id: The ID of the disk to resize
:type disk_id: ``str``
:param size: The disk size in GB
:type size: ``int``
:rtype: ``bool``
"""
create_node = ET.Element('ChangeDiskSize', {'xmlns': SERVER_NS})
ET.SubElement(create_node, 'newSizeGb').text = str(size)
result = self.connection.request_with_orgId_api_1(
'server/%s/disk/%s/changeSize' %
(node.id, disk_id),
method='POST',
data=ET.tostring(create_node)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_reconfigure_node(self, node, memory_gb, cpu_count, cores_per_socket,
cpu_performance):
"""
Reconfigure the virtual hardware specification of a node
:param node: The server to change
:type node: :class:`Node`
:param memory_gb: The amount of memory in GB (optional)
:type memory_gb: ``int``
:param cpu_count: The number of CPU (optional)
:type cpu_count: ``int``
:param cores_per_socket: Number of CPU cores per socket (optional)
:type cores_per_socket: ``int``
:param cpu_performance: CPU Performance type (optional)
:type cpu_performance: ``str``
:rtype: ``bool``
"""
update = ET.Element('reconfigureServer', {'xmlns': TYPES_URN})
update.set('id', node.id)
if memory_gb is not None:
ET.SubElement(update, 'memoryGb').text = str(memory_gb)
if cpu_count is not None:
ET.SubElement(update, 'cpuCount').text = str(cpu_count)
if cpu_performance is not None:
ET.SubElement(update, 'cpuSpeed').text = cpu_performance
if cores_per_socket is not None:
ET.SubElement(update, 'coresPerSocket').text = \
str(cores_per_socket)
result = self.connection.request_with_orgId_api_2(
'server/reconfigureServer',
method='POST',
data=ET.tostring(update)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_clone_node_to_image(self, node, image_name,
image_description=None,
cluster_id=None, is_guest_Os_Customization=None,
tag_key_id=None, tag_value=None):
"""
Clone a server into a customer image.
:param node: The server to clone
:type node: :class:`Node`
:param image_name: The name of the clone image
:type image_name: ``str``
:param description: The description of the image
:type description: ``str``
:rtype: ``bool``
"""
if image_description is None:
image_description = ''
node_id = self._node_to_node_id(node)
# Version 2.3 and lower
if LooseVersion(self.connection.active_api_version) < LooseVersion(
'2.4'):
response = self.connection.request_with_orgId_api_1(
'server/%s?clone=%s&desc=%s' %
(node_id, image_name, image_description)).object
# Version 2.4 and higher
else:
clone_server_elem = ET.Element('cloneServer',
{'id': node_id,
'xmlns': TYPES_URN})
ET.SubElement(clone_server_elem, 'imageName').text = image_name
if image_description is not None:
ET.SubElement(clone_server_elem, 'description').text = \
image_description
if cluster_id is not None:
ET.SubElement(clone_server_elem, 'clusterId').text = \
cluster_id
if is_guest_Os_Customization is not None:
ET.SubElement(clone_server_elem, 'guestOsCustomization')\
.text = is_guest_Os_Customization
if tag_key_id is not None:
tag_elem = ET.SubElement(clone_server_elem, 'tagById')
ET.SubElement(tag_elem, 'tagKeyId').text = tag_key_id
if tag_value is not None:
ET.SubElement(tag_elem, 'value').text = tag_value
response = self.connection.request_with_orgId_api_2(
'server/cloneServer',
method='POST',
data=ET.tostring(clone_server_elem)).object
# Version 2.3 and lower
if LooseVersion(self.connection.active_api_version) < LooseVersion(
'2.4'):
response_code = findtext(response, 'result', GENERAL_NS)
else:
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_clean_failed_deployment(self, node):
"""
Removes a node that has failed to deploy
:param node: The failed node to clean
:type node: :class:`Node` or ``str``
"""
node_id = self._node_to_node_id(node)
request_elm = ET.Element('cleanServer',
{'xmlns': TYPES_URN, 'id': node_id})
body = self.connection.request_with_orgId_api_2(
'server/cleanServer',
method='POST',
data=ET.tostring(request_elm)).object
response_code = findtext(body, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_customer_images(self, location=None):
"""
Return a list of customer imported images
:param location: The target location
:type location: :class:`NodeLocation` or ``str``
:rtype: ``list`` of :class:`NodeImage`
"""
params = {}
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
return self._to_images(
self.connection.request_with_orgId_api_2(
'image/customerImage',
params=params)
.object, 'customerImage')
def ex_get_base_image_by_id(self, id):
"""
Gets a Base image in the Dimension Data Cloud given the id
:param id: The id of the image
:type id: ``str``
:rtype: :class:`NodeImage`
"""
image = self.connection.request_with_orgId_api_2(
'image/osImage/%s' % id).object
return self._to_image(image)
def ex_get_customer_image_by_id(self, id):
"""
Gets a Customer image in the Dimension Data Cloud given the id
:param id: The id of the image
:type id: ``str``
:rtype: :class:`NodeImage`
"""
image = self.connection.request_with_orgId_api_2(
'image/customerImage/%s' % id).object
return self._to_image(image)
def ex_get_image_by_id(self, id):
"""
Gets a Base/Customer image in the Dimension Data Cloud given the id
Note: This first checks the base image
If it is not a base image we check if it is a customer image
If it is not in either of these a DimensionDataAPIException
is thrown
:param id: The id of the image
:type id: ``str``
:rtype: :class:`NodeImage`
"""
try:
return self.ex_get_base_image_by_id(id)
except DimensionDataAPIException as e:
if e.code != 'RESOURCE_NOT_FOUND':
raise e
return self.ex_get_customer_image_by_id(id)
def ex_create_tag_key(self, name, description=None,
value_required=True, display_on_report=True):
"""
Creates a tag key in the Dimension Data Cloud
:param name: The name of the tag key (required)
:type name: ``str``
:param description: The description of the tag key
:type description: ``str``
:param value_required: If a value is required for the tag
Tags themselves can be just a tag,
or be a key/value pair
:type value_required: ``bool``
:param display_on_report: Should this key show up on the usage reports
:type display_on_report: ``bool``
:rtype: ``bool``
"""
create_tag_key = ET.Element('createTagKey', {'xmlns': TYPES_URN})
ET.SubElement(create_tag_key, 'name').text = name
if description is not None:
ET.SubElement(create_tag_key, 'description').text = description
ET.SubElement(create_tag_key, 'valueRequired').text = \
str(value_required).lower()
ET.SubElement(create_tag_key, 'displayOnReport').text = \
str(display_on_report).lower()
response = self.connection.request_with_orgId_api_2(
'tag/createTagKey',
method='POST',
data=ET.tostring(create_tag_key)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_tag_keys(self, id=None, name=None,
value_required=None, display_on_report=None):
"""
List tag keys in the Dimension Data Cloud
:param id: Filter the list to the id of the tag key
:type id: ``str``
:param name: Filter the list to the name of the tag key
:type name: ``str``
:param value_required: Filter the list to if a value is required
for a tag key
:type value_required: ``bool``
:param display_on_report: Filter the list to if the tag key should
show up on usage reports
:type display_on_report: ``bool``
:rtype: ``list`` of :class:`DimensionDataTagKey`
"""
params = {}
if id is not None:
params['id'] = id
if name is not None:
params['name'] = name
if value_required is not None:
params['valueRequired'] = str(value_required).lower()
if display_on_report is not None:
params['displayOnReport'] = str(display_on_report).lower()
paged_result = self.connection.paginated_request_with_orgId_api_2(
'tag/tagKey',
method='GET',
params=params
)
tag_keys = []
for result in paged_result:
tag_keys.extend(self._to_tag_keys(result))
return tag_keys
def ex_get_tag_key_by_id(self, id):
"""
Get a specific tag key by ID
:param id: ID of the tag key you want (required)
:type id: ``str``
:rtype: :class:`DimensionDataTagKey`
"""
tag_key = self.connection.request_with_orgId_api_2(
'tag/tagKey/%s' % id).object
return self._to_tag_key(tag_key)
def ex_get_tag_key_by_name(self, name):
"""
Get a specific tag key by Name
:param name: Name of the tag key you want (required)
:type name: ``str``
:rtype: :class:`DimensionDataTagKey`
"""
tag_keys = self.ex_list_tag_keys(name=name)
if len(tag_keys) != 1:
raise ValueError("No tags found with name %s" % name)
return tag_keys[0]
def ex_modify_tag_key(self, tag_key, name=None, description=None,
value_required=None, display_on_report=None):
"""
Modify a specific tag key
:param tag_key: The tag key you want to modify (required)
:type tag_key: :class:`DimensionDataTagKey` or ``str``
:param name: Set to modifiy the name of the tag key
:type name: ``str``
:param description: Set to modify the description of the tag key
:type description: ``str``
:param value_required: Set to modify if a value is required for
the tag key
:type value_required: ``bool``
:param display_on_report: Set to modify if this tag key should display
on the usage reports
:type display_on_report: ``bool``
:rtype: ``bool``
"""
tag_key_id = self._tag_key_to_tag_key_id(tag_key)
modify_tag_key = ET.Element('editTagKey',
{'xmlns': TYPES_URN, 'id': tag_key_id})
if name is not None:
ET.SubElement(modify_tag_key, 'name').text = name
if description is not None:
ET.SubElement(modify_tag_key, 'description').text = description
if value_required is not None:
ET.SubElement(modify_tag_key, 'valueRequired').text = \
str(value_required).lower()
if display_on_report is not None:
ET.SubElement(modify_tag_key, 'displayOnReport').text = \
str(display_on_report).lower()
response = self.connection.request_with_orgId_api_2(
'tag/editTagKey',
method='POST',
data=ET.tostring(modify_tag_key)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_remove_tag_key(self, tag_key):
"""
Modify a specific tag key
:param tag_key: The tag key you want to remove (required)
:type tag_key: :class:`DimensionDataTagKey` or ``str``
:rtype: ``bool``
"""
tag_key_id = self._tag_key_to_tag_key_id(tag_key)
remove_tag_key = ET.Element('deleteTagKey',
{'xmlns': TYPES_URN, 'id': tag_key_id})
response = self.connection.request_with_orgId_api_2(
'tag/deleteTagKey',
method='POST',
data=ET.tostring(remove_tag_key)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_apply_tag_to_asset(self, asset, tag_key, value=None):
"""
Apply a tag to a Dimension Data Asset
:param asset: The asset to apply a tag to. (required)
:type asset: :class:`Node` or :class:`NodeImage` or
:class:`DimensionDataNewtorkDomain` or
:class:`DimensionDataVlan` or
:class:`DimensionDataPublicIpBlock`
:param tag_key: The tag_key to apply to the asset. (required)
:type tag_key: :class:`DimensionDataTagKey` or ``str``
:param value: The value to be assigned to the tag key
This is only required if the :class:`DimensionDataTagKey`
requires it
:type value: ``str``
:rtype: ``bool``
"""
asset_type = self._get_tagging_asset_type(asset)
tag_key_name = self._tag_key_to_tag_key_name(tag_key)
apply_tags = ET.Element('applyTags', {'xmlns': TYPES_URN})
ET.SubElement(apply_tags, 'assetType').text = asset_type
ET.SubElement(apply_tags, 'assetId').text = asset.id
tag_ele = ET.SubElement(apply_tags, 'tag')
ET.SubElement(tag_ele, 'tagKeyName').text = tag_key_name
if value is not None:
ET.SubElement(tag_ele, 'value').text = value
response = self.connection.request_with_orgId_api_2(
'tag/applyTags',
method='POST',
data=ET.tostring(apply_tags)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_remove_tag_from_asset(self, asset, tag_key):
"""
Remove a tag from an asset
:param asset: The asset to remove a tag from. (required)
:type asset: :class:`Node` or :class:`NodeImage` or
:class:`DimensionDataNewtorkDomain` or
:class:`DimensionDataVlan` or
:class:`DimensionDataPublicIpBlock`
:param tag_key: The tag key you want to remove (required)
:type tag_key: :class:`DimensionDataTagKey` or ``str``
:rtype: ``bool``
"""
asset_type = self._get_tagging_asset_type(asset)
tag_key_name = self._tag_key_to_tag_key_name(tag_key)
apply_tags = ET.Element('removeTags', {'xmlns': TYPES_URN})
ET.SubElement(apply_tags, 'assetType').text = asset_type
ET.SubElement(apply_tags, 'assetId').text = asset.id
ET.SubElement(apply_tags, 'tagKeyName').text = tag_key_name
response = self.connection.request_with_orgId_api_2(
'tag/removeTags',
method='POST',
data=ET.tostring(apply_tags)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_tags(self, asset_id=None, asset_type=None, location=None,
tag_key_name=None, tag_key_id=None, value=None,
value_required=None, display_on_report=None):
"""
List tags in the Dimension Data Cloud
:param asset_id: Filter the list by asset id
:type asset_id: ``str``
:param asset_type: Filter the list by asset type
:type asset_type: ``str``
:param location: Filter the list by the assets location
:type location: :class:``NodeLocation`` or ``str``
:param tag_key_name: Filter the list by a tag key name
:type tag_key_name: ``str``
:param tag_key_id: Filter the list by a tag key id
:type tag_key_id: ``str``
:param value: Filter the list by a tag value
:type value: ``str``
:param value_required: Filter the list to if a value is required
for a tag
:type value_required: ``bool``
:param display_on_report: Filter the list to if the tag should
show up on usage reports
:type display_on_report: ``bool``
:rtype: ``list`` of :class:`DimensionDataTag`
"""
params = {}
if asset_id is not None:
params['assetId'] = asset_id
if asset_type is not None:
params['assetType'] = asset_type
if location is not None:
params['datacenterId'] = self._location_to_location_id(location)
if tag_key_name is not None:
params['tagKeyName'] = tag_key_name
if tag_key_id is not None:
params['tagKeyId'] = tag_key_id
if value is not None:
params['value'] = value
if value_required is not None:
params['valueRequired'] = str(value_required).lower()
if display_on_report is not None:
params['displayOnReport'] = str(display_on_report).lower()
paged_result = self.connection.paginated_request_with_orgId_api_2(
'tag/tag',
method='GET',
params=params
)
tags = []
for result in paged_result:
tags.extend(self._to_tags(result))
return tags
def ex_summary_usage_report(self, start_date, end_date):
"""
Get summary usage information
:param start_date: Start date for the report
:type start_date: ``str`` in format YYYY-MM-DD
:param end_date: End date for the report
:type end_date: ``str`` in format YYYY-MM-DD
:rtype: ``list`` of ``list``
"""
result = self.connection.raw_request_with_orgId_api_1(
'report/usage?startDate=%s&endDate=%s' % (
start_date, end_date))
return self._format_csv(result.response)
def ex_detailed_usage_report(self, start_date, end_date):
"""
Get detailed usage information
:param start_date: Start date for the report
:type start_date: ``str`` in format YYYY-MM-DD
:param end_date: End date for the report
:type end_date: ``str`` in format YYYY-MM-DD
:rtype: ``list`` of ``list``
"""
result = self.connection.raw_request_with_orgId_api_1(
'report/usageDetailed?startDate=%s&endDate=%s' % (
start_date, end_date))
return self._format_csv(result.response)
def ex_software_usage_report(self, start_date, end_date):
"""
Get detailed software usage reports
:param start_date: Start date for the report
:type start_date: ``str`` in format YYYY-MM-DD
:param end_date: End date for the report
:type end_date: ``str`` in format YYYY-MM-DD
:rtype: ``list`` of ``list``
"""
result = self.connection.raw_request_with_orgId_api_1(
'report/usageSoftwareUnits?startDate=%s&endDate=%s' % (
start_date, end_date))
return self._format_csv(result.response)
def ex_audit_log_report(self, start_date, end_date):
"""
Get audit log report
:param start_date: Start date for the report
:type start_date: ``str`` in format YYYY-MM-DD
:param end_date: End date for the report
:type end_date: ``str`` in format YYYY-MM-DD
:rtype: ``list`` of ``list``
"""
result = self.connection.raw_request_with_orgId_api_1(
'auditlog?startDate=%s&endDate=%s' % (
start_date, end_date))
return self._format_csv(result.response)
def ex_backup_usage_report(self, start_date, end_date, location):
"""
Get audit log report
:param start_date: Start date for the report
:type start_date: ``str`` in format YYYY-MM-DD
:param end_date: End date for the report
:type end_date: ``str`` in format YYYY-MM-DD
:keyword location: Filters the node list to nodes that are
located in this location
:type location: :class:`NodeLocation` or ``str``
:rtype: ``list`` of ``list``
"""
datacenter_id = self._location_to_location_id(location)
result = self.connection.raw_request_with_orgId_api_1(
'backup/detailedUsageReport?datacenterId=%s&fromDate=%s&toDate=%s'
% (datacenter_id, start_date, end_date))
return self._format_csv(result.response)
def ex_list_ip_address_list(self, ex_network_domain):
"""
List IP Address List by network domain ID specified
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>> # List IP Address List of network domain
>>> ipaddresslist_list = driver.ex_list_ip_address_list(
>>> ex_network_domain=my_network_domain)
>>> pprint(ipaddresslist_list)
:param ex_network_domain: The network domain or network domain ID
:type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str'
:return: a list of DimensionDataIpAddressList objects
:rtype: ``list`` of :class:`DimensionDataIpAddressList`
"""
params = {'networkDomainId': self._network_domain_to_network_domain_id(
ex_network_domain)}
response = self.connection.request_with_orgId_api_2(
'network/ipAddressList', params=params).object
return self._to_ip_address_lists(response)
def ex_get_ip_address_list(self, ex_network_domain,
ex_ip_address_list_name):
"""
Get IP Address List by name in network domain specified
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>> # Get IP Address List by Name
>>> ipaddresslist_list_by_name = driver.ex_get_ip_address_list(
>>> ex_network_domain=my_network_domain,
>>> ex_ip_address_list_name='My_IP_AddressList_1')
>>> pprint(ipaddresslist_list_by_name)
:param ex_network_domain: (required) The network domain or network
domain ID in which ipaddresslist resides.
:type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str'
:param ex_ip_address_list_name: (required) Get 'IP Address List' by
name
:type ex_ip_address_list_name: :``str``
:return: a list of DimensionDataIpAddressList objects
:rtype: ``list`` of :class:`DimensionDataIpAddressList`
"""
ip_address_lists = self.ex_list_ip_address_list(ex_network_domain)
return list(filter(lambda x: x.name == ex_ip_address_list_name,
ip_address_lists))
def ex_create_ip_address_list(self, ex_network_domain, name,
description,
ip_version, ip_address_collection,
child_ip_address_list=None):
"""
Create IP Address List. IP Address list.
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> from libcloud.common.dimensiondata import DimensionDataIpAddress
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>> # IP Address collection
>>> ipAddress_1 = DimensionDataIpAddress(begin='190.2.2.100')
>>> ipAddress_2 = DimensionDataIpAddress(begin='190.2.2.106',
end='190.2.2.108')
>>> ipAddress_3 = DimensionDataIpAddress(begin='190.2.2.0',
prefix_size='24')
>>> ip_address_collection = [ipAddress_1, ipAddress_2, ipAddress_3]
>>>
>>> # Create IPAddressList
>>> result = driver.ex_create_ip_address_list(
>>> ex_network_domain=my_network_domain,
>>> name='My_IP_AddressList_2',
>>> ip_version='IPV4',
>>> description='Test only',
>>> ip_address_collection=ip_address_collection,
>>> child_ip_address_list='08468e26-eeb3-4c3d-8ff2-5351fa6d8a04'
>>> )
>>>
>>> pprint(result)
:param ex_network_domain: The network domain or network domain ID
:type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str'
:param name: IP Address List Name (required)
:type name: :``str``
:param description: IP Address List Description (optional)
:type description: :``str``
:param ip_version: IP Version of ip address (required)
:type ip_version: :``str``
:param ip_address_collection: List of IP Address. At least one
ipAddress element or one
childIpAddressListId element must
be provided.
:type ip_address_collection: :``str``
:param child_ip_address_list: Child IP Address List or id to be
included in this IP Address List.
At least one ipAddress or
one childIpAddressListId
must be provided.
:type child_ip_address_list:
:class:'DimensionDataChildIpAddressList` or `str``
:return: a list of DimensionDataIpAddressList objects
:rtype: ``list`` of :class:`DimensionDataIpAddressList`
"""
if (ip_address_collection is None and
child_ip_address_list is None):
raise ValueError("At least one ipAddress element or one "
"childIpAddressListId element must be "
"provided.")
create_ip_address_list = ET.Element('createIpAddressList',
{'xmlns': TYPES_URN})
ET.SubElement(
create_ip_address_list,
'networkDomainId'
).text = self._network_domain_to_network_domain_id(ex_network_domain)
ET.SubElement(
create_ip_address_list,
'name'
).text = name
ET.SubElement(
create_ip_address_list,
'description'
).text = description
ET.SubElement(
create_ip_address_list,
'ipVersion'
).text = ip_version
for ip in ip_address_collection:
ip_address = ET.SubElement(
create_ip_address_list,
'ipAddress',
)
ip_address.set('begin', ip.begin)
if ip.end:
ip_address.set('end', ip.end)
if ip.prefix_size:
ip_address.set('prefixSize', ip.prefix_size)
if child_ip_address_list is not None:
ET.SubElement(
create_ip_address_list,
'childIpAddressListId'
).text = \
self._child_ip_address_list_to_child_ip_address_list_id(
child_ip_address_list)
response = self.connection.request_with_orgId_api_2(
'network/createIpAddressList',
method='POST',
data=ET.tostring(create_ip_address_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_edit_ip_address_list(self, ex_ip_address_list, description,
ip_address_collection,
child_ip_address_lists=None):
"""
Edit IP Address List. IP Address list.
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> from libcloud.common.dimensiondata import DimensionDataIpAddress
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # IP Address collection
>>> ipAddress_1 = DimensionDataIpAddress(begin='190.2.2.100')
>>> ipAddress_2 = DimensionDataIpAddress(begin='190.2.2.106',
>>> end='190.2.2.108')
>>> ipAddress_3 = DimensionDataIpAddress(
>>> begin='190.2.2.0', prefix_size='24')
>>> ip_address_collection = [ipAddress_1, ipAddress_2, ipAddress_3]
>>>
>>> # Edit IP Address List
>>> ip_address_list_id = '5e7c323f-c885-4e4b-9a27-94c44217dbd3'
>>> result = driver.ex_edit_ip_address_list(
>>> ex_ip_address_list=ip_address_list_id,
>>> description="Edit Test",
>>> ip_address_collection=ip_address_collection,
>>> child_ip_address_lists=None
>>> )
>>> pprint(result)
:param ex_ip_address_list: (required) IpAddressList object or
IpAddressList ID
:type ex_ip_address_list: :class:'DimensionDataIpAddressList'
or ``str``
:param description: IP Address List Description
:type description: :``str``
:param ip_address_collection: List of IP Address
:type ip_address_collection: ''list'' of
:class:'DimensionDataIpAddressList'
:param child_ip_address_lists: Child IP Address List or id to be
included in this IP Address List
:type child_ip_address_lists: ``list`` of
:class:'DimensionDataChildIpAddressList'
or ``str``
:return: a list of DimensionDataIpAddressList objects
:rtype: ``list`` of :class:`DimensionDataIpAddressList`
"""
edit_ip_address_list = ET.Element(
'editIpAddressList',
{'xmlns': TYPES_URN,
"id": self._ip_address_list_to_ip_address_list_id(
ex_ip_address_list),
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"
})
ET.SubElement(
edit_ip_address_list,
'description'
).text = description
for ip in ip_address_collection:
ip_address = ET.SubElement(
edit_ip_address_list,
'ipAddress',
)
ip_address.set('begin', ip.begin)
if ip.end:
ip_address.set('end', ip.end)
if ip.prefix_size:
ip_address.set('prefixSize', ip.prefix_size)
if child_ip_address_lists is not None:
ET.SubElement(
edit_ip_address_list,
'childIpAddressListId'
).text = self._child_ip_address_list_to_child_ip_address_list_id(
child_ip_address_lists)
else:
ET.SubElement(
edit_ip_address_list,
'childIpAddressListId',
{'xsi:nil': 'true'}
)
response = self.connection.request_with_orgId_api_2(
'network/editIpAddressList',
method='POST',
data=ET.tostring(edit_ip_address_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_delete_ip_address_list(self, ex_ip_address_list):
"""
Delete IP Address List by ID
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> ip_address_list_id = '5e7c323f-c885-4e4b-9a27-94c44217dbd3'
>>> result = driver.ex_delete_ip_address_list(ip_address_list_id)
>>> pprint(result)
:param ex_ip_address_list: IP Address List object or IP Address
List ID (required)
:type ex_ip_address_list: :class:'DimensionDataIpAddressList'
or ``str``
:rtype: ``bool``
"""
delete_ip_address_list = \
ET.Element('deleteIpAddressList', {'xmlns': TYPES_URN, 'id': self
._ip_address_list_to_ip_address_list_id(
ex_ip_address_list)})
response = self.connection.request_with_orgId_api_2(
'network/deleteIpAddressList',
method='POST',
data=ET.tostring(delete_ip_address_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_portlist(self, ex_network_domain):
"""
List Portlist by network domain ID specified
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
>>> networkDomainName][0]
>>>
>>> # List portlist
>>> portLists = driver.ex_list_portlist(
>>> ex_network_domain=my_network_domain)
>>> pprint(portLists)
>>>
:param ex_network_domain: The network domain or network domain ID
:type ex_network_domain: :class:`DimensionDataNetworkDomain` or 'str'
:return: a list of DimensionDataPortList objects
:rtype: ``list`` of :class:`DimensionDataPortList`
"""
params = {'networkDomainId':
self._network_domain_to_network_domain_id(ex_network_domain)}
response = self.connection.request_with_orgId_api_2(
'network/portList', params=params).object
return self._to_port_lists(response)
def ex_get_portlist(self, ex_portlist_id):
"""
Get Port List
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get specific portlist by ID
>>> portlist_id = '27dd8c66-80ff-496b-9f54-2a3da2fe679e'
>>> portlist = driver.ex_get_portlist(portlist_id)
>>> pprint(portlist)
:param ex_portlist_id: The ex_port_list or ex_port_list ID
:type ex_portlist_id: :class:`DimensionDataNetworkDomain` or 'str'
:return: DimensionDataPortList object
:rtype: :class:`DimensionDataPort`
"""
url_path = ('network/portList/%s' % ex_portlist_id)
response = self.connection.request_with_orgId_api_2(
url_path).object
return self._to_port_list(response)
def ex_create_portlist(self, ex_network_domain, name, description,
port_collection, child_portlist_list=None):
"""
Create Port List.
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> from libcloud.common.dimensiondata import DimensionDataPort
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Get location
>>> location = driver.ex_get_location_by_id(id='AU9')
>>>
>>> # Get network domain by location
>>> networkDomainName = "Baas QA"
>>> network_domains = driver.ex_list_network_domains(location=location)
>>> my_network_domain = [d for d in network_domains if d.name ==
networkDomainName][0]
>>>
>>> # Port Collection
>>> port_1 = DimensionDataPort(begin='1000')
>>> port_2 = DimensionDataPort(begin='1001', end='1003')
>>> port_collection = [port_1, port_2]
>>>
>>> # Create Port List
>>> new_portlist = driver.ex_create_portlist(
>>> ex_network_domain=my_network_domain,
>>> name='MyPortListX',
>>> description="Test only",
>>> port_collection=port_collection,
>>> child_portlist_list={'a9cd4984-6ff5-4f93-89ff-8618ab642bb9'}
>>> )
>>> pprint(new_portlist)
:param ex_network_domain: (required) The network domain in
which to create PortList. Provide
networkdomain object or its id.
:type ex_network_domain: :``str``
:param name: Port List Name
:type name: :``str``
:param description: IP Address List Description
:type description: :``str``
:param port_collection: List of Port Address
:type port_collection: :``str``
:param child_portlist_list: List of Child Portlist to be
included in this Port List
:type child_portlist_list: :``str`` or ''list of
:class:'DimensionDataChildPortList'
:return: result of operation
:rtype: ``bool``
"""
new_port_list = ET.Element('createPortList', {'xmlns': TYPES_URN})
ET.SubElement(
new_port_list,
'networkDomainId'
).text = self._network_domain_to_network_domain_id(ex_network_domain)
ET.SubElement(
new_port_list,
'name'
).text = name
ET.SubElement(
new_port_list,
'description'
).text = description
for port in port_collection:
p = ET.SubElement(
new_port_list,
'port'
)
p.set('begin', port.begin)
if port.end:
p.set('end', port.end)
if child_portlist_list is not None:
for child in child_portlist_list:
ET.SubElement(
new_port_list,
'childPortListId'
).text = self._child_port_list_to_child_port_list_id(child)
response = self.connection.request_with_orgId_api_2(
'network/createPortList',
method='POST',
data=ET.tostring(new_port_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_edit_portlist(self, ex_portlist, description,
port_collection, child_portlist_list=None):
"""
Edit Port List.
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> from libcloud.common.dimensiondata import DimensionDataPort
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Port Collection
>>> port_1 = DimensionDataPort(begin='4200')
>>> port_2 = DimensionDataPort(begin='4201', end='4210')
>>> port_collection = [port_1, port_2]
>>>
>>> # Edit Port List
>>> editPortlist = driver.ex_get_portlist(
'27dd8c66-80ff-496b-9f54-2a3da2fe679e')
>>>
>>> result = driver.ex_edit_portlist(
>>> ex_portlist=editPortlist.id,
>>> description="Make Changes in portlist",
>>> port_collection=port_collection,
>>> child_portlist_list={'a9cd4984-6ff5-4f93-89ff-8618ab642bb9'}
>>> )
>>> pprint(result)
:param ex_portlist: Port List to be edited
(required)
:type ex_portlist: :``str`` or :class:'DimensionDataPortList'
:param description: Port List Description
:type description: :``str``
:param port_collection: List of Ports
:type port_collection: :``str``
:param child_portlist_list: Child PortList to be included in
this IP Address List
:type child_portlist_list: :``list`` of
:class'DimensionDataChildPortList'
or ''str''
:return: a list of DimensionDataPortList objects
:rtype: ``list`` of :class:`DimensionDataPortList`
"""
existing_port_address_list = ET.Element(
'editPortList',
{
"id": self._port_list_to_port_list_id(ex_portlist),
'xmlns': TYPES_URN,
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"
})
ET.SubElement(
existing_port_address_list,
'description'
).text = description
for port in port_collection:
p = ET.SubElement(
existing_port_address_list,
'port'
)
p.set('begin', port.begin)
if port.end:
p.set('end', port.end)
if child_portlist_list is not None:
for child in child_portlist_list:
ET.SubElement(
existing_port_address_list,
'childPortListId'
).text = self._child_port_list_to_child_port_list_id(child)
else:
ET.SubElement(
existing_port_address_list,
'childPortListId',
{'xsi:nil': 'true'}
)
response = self.connection.request_with_orgId_api_2(
'network/editPortList',
method='POST',
data=ET.tostring(existing_port_address_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_delete_portlist(self, ex_portlist):
"""
Delete Port List
>>> from pprint import pprint
>>> from libcloud.compute.types import Provider
>>> from libcloud.compute.providers import get_driver
>>> import libcloud.security
>>>
>>> # Get dimension data driver
>>> libcloud.security.VERIFY_SSL_CERT = True
>>> cls = get_driver(Provider.DIMENSIONDATA)
>>> driver = cls('myusername','mypassword', region='dd-au')
>>>
>>> # Delete Port List
>>> portlist_id = '157531ce-77d4-493c-866b-d3d3fc4a912a'
>>> response = driver.ex_delete_portlist(portlist_id)
>>> pprint(response)
:param ex_portlist: Port List to be deleted
:type ex_portlist: :``str`` or :class:'DimensionDataPortList'
:rtype: ``bool``
"""
delete_port_list = ET.Element(
'deletePortList',
{'xmlns': TYPES_URN,
'id': self._port_list_to_port_list_id(ex_portlist)})
response = self.connection.request_with_orgId_api_2(
'network/deletePortList',
method='POST',
data=ET.tostring(delete_port_list)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_exchange_nic_vlans(self, nic_id_1, nic_id_2):
"""
Exchange NIC Vlans
:param nic_id_1: Nic ID 1
:type nic_id_1: :``str``
:param nic_id_2: Nic ID 2
:type nic_id_2: :``str``
:rtype: ``bool``
"""
exchange_elem = ET.Element(
'urn:exchangeNicVlans',
{
'xmlns:urn': TYPES_URN,
})
ET.SubElement(exchange_elem, 'urn:nicId1').text = nic_id_1
ET.SubElement(exchange_elem, 'urn:nicId2').text = nic_id_2
response = self.connection.request_with_orgId_api_2(
'server/exchangeNicVlans',
method='POST',
data=ET.tostring(exchange_elem)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_change_nic_network_adapter(self, nic_id, network_adapter_name):
"""
Change network adapter of a NIC on a cloud server
:param nic_id: Nic ID
:type nic_id: :``str``
:param network_adapter_name: Network adapter name
:type network_adapter_name: :``str``
:rtype: ``bool``
"""
change_elem = ET.Element(
'changeNetworkAdapter',
{
'nicId': nic_id,
'xmlns': TYPES_URN
})
ET.SubElement(change_elem, 'networkAdapter').text = \
network_adapter_name
response = self.connection.request_with_orgId_api_2(
'server/changeNetworkAdapter',
method='POST',
data=ET.tostring(change_elem)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_create_node_uncustomized(self,
name,
image,
ex_network_domain,
ex_is_started=True,
ex_description=None,
ex_cluster_id=None,
ex_cpu_specification=None,
ex_memory_gb=None,
ex_primary_nic_private_ipv4=None,
ex_primary_nic_vlan=None,
ex_primary_nic_network_adapter=None,
ex_additional_nics=None,
ex_disks=None,
ex_tagid_value_pairs=None,
ex_tagname_value_pairs=None
):
"""
This MCP 2.0 only function deploys a new Cloud Server from a
CloudControl compatible Server Image, which does not utilize
VMware Guest OS Customization process.
Create Node in MCP2 Data Center
:keyword name: (required) String with a name for this new node
:type name: ``str``
:keyword image: (UUID of the Server Image being used as the target
for the new Server deployment. The source Server
Image (OS Image or Customer Image) must have
osCustomization set to true. See Get/List OS
Image(s) and Get/List Customer Image(s).
:type image: :class:`NodeImage` or ``str``
:keyword ex_network_domain: (required) Network Domain or Network
Domain ID to create the node
:type ex_network_domain: :class:`DimensionDataNetworkDomain`
or ``str``
:keyword ex_description: (optional) description for this node
:type ex_description: ``str``
:keyword ex_cluster_id: (optional) For multiple cluster
environments, it is possible to set a destination cluster for the new
Customer Image. Note that performance of this function is optimal when
either the Server cluster and destination are the same or when shared
data storage is in place for the multiple clusters.
:type ex_cluster_id: ``str``
:keyword ex_primary_nic_private_ipv4: Provide private IPv4. Ignore
if ex_primary_nic_vlan is
provided. Use one or the
other. Not both.
:type ex_primary_nic_private_ipv4: :``str``
:keyword ex_primary_nic_vlan: Provide VLAN for the node if
ex_primary_nic_private_ipv4 NOT
provided. One or the other. Not both.
:type ex_primary_nic_vlan: :class: DimensionDataVlan or ``str``
:keyword ex_primary_nic_network_adapter: (Optional) Default value
for the Operating System
will be used if leave
empty. Example: "E1000".
:type ex_primary_nic_network_adapter: :``str``
:keyword ex_additional_nics: (optional) List
:class:'DimensionDataNic' or None
:type ex_additional_nics: ``list`` of :class:'DimensionDataNic'
or ``str``
:keyword ex_memory_gb: (optional) The amount of memory in GB for
the server Can be used to override the
memory value inherited from the source
Server Image.
:type ex_memory_gb: ``int``
:keyword ex_cpu_specification: (optional) The spec of CPU to deploy
:type ex_cpu_specification:
:class:`DimensionDataServerCpuSpecification`
:keyword ex_is_started: (required) Start server after creation.
Default is set to true.
:type ex_is_started: ``bool``
:keyword ex_disks: (optional) Dimensiondata disks. Optional disk
elements can be used to define the disk speed
that each disk on the Server; inherited from the
source Server Image will be deployed to. It is
not necessary to include a diskelement for every
disk; only those that you wish to set a disk
speed value for. Note that scsiId 7 cannot be
used.Up to 13 disks can be present in addition to
the required OS disk on SCSI ID 0. Refer to
https://docs.mcp-services.net/x/UwIu for disk
:type ex_disks: List or tuple of :class:'DimensionDataServerDisk`
:keyword ex_tagid_value_pairs:
(Optional) up to 10 tag elements may be provided.
A combination of tagById and tag name cannot be
supplied in the same request.
Note: ex_tagid_value_pairs and
ex_tagname_value_pairs is
mutually exclusive. Use one or other.
:type ex_tagname_value_pairs: ``dict``. Value can be None.
:keyword ex_tagname_value_pairs:
(Optional) up to 10 tag elements may be provided.
A combination of tagById and tag name cannot be
supplied in the same request.
Note: ex_tagid_value_pairs and
ex_tagname_value_pairs is
mutually exclusive. Use one or other.
:type ex_tagname_value_pairs: ``dict```.
:return: The newly created :class:`Node`.
:rtype: :class:`Node`
"""
# Unsupported for version lower than 2.4
if LooseVersion(self.connection.active_api_version) < LooseVersion(
'2.4'):
raise Exception("This feature is NOT supported in "
"earlier api version of 2.4")
# Default start to true if input is invalid
if not isinstance(ex_is_started, bool):
ex_is_started = True
print("Warning: ex_is_started input value is invalid. Default"
"to True")
server_uncustomized_elm = ET.Element('deployUncustomizedServer',
{'xmlns': TYPES_URN})
ET.SubElement(server_uncustomized_elm, "name").text = name
ET.SubElement(server_uncustomized_elm, "description").text = \
ex_description
image_id = self._image_to_image_id(image)
ET.SubElement(server_uncustomized_elm, "imageId").text = image_id
if ex_cluster_id:
dns_elm = ET.SubElement(server_uncustomized_elm, "primaryDns")
dns_elm.text = ex_cluster_id
if ex_is_started is not None:
ET.SubElement(server_uncustomized_elm, "start").text = str(
ex_is_started).lower()
if ex_cpu_specification is not None:
cpu = ET.SubElement(server_uncustomized_elm, "cpu")
cpu.set('speed', ex_cpu_specification.performance)
cpu.set('count', str(ex_cpu_specification.cpu_count))
cpu.set('coresPerSocket',
str(ex_cpu_specification.cores_per_socket))
if ex_memory_gb is not None:
ET.SubElement(server_uncustomized_elm, "memoryGb").text = \
str(ex_memory_gb)
if (ex_primary_nic_private_ipv4 is None and
ex_primary_nic_vlan is None):
raise ValueError("Missing argument. Either "
"ex_primary_nic_private_ipv4 or "
"ex_primary_nic_vlan "
"must be specified.")
if (ex_primary_nic_private_ipv4 is not None and
ex_primary_nic_vlan is not None):
raise ValueError("Either ex_primary_nic_private_ipv4 or "
"ex_primary_nic_vlan "
"be specified. Not both.")
network_elm = ET.SubElement(server_uncustomized_elm, "networkInfo")
net_domain_id = self._network_domain_to_network_domain_id(
ex_network_domain)
network_elm.set('networkDomainId', net_domain_id)
pri_nic = ET.SubElement(network_elm, 'primaryNic')
if ex_primary_nic_private_ipv4 is not None:
ET.SubElement(pri_nic,
'privateIpv4').text = ex_primary_nic_private_ipv4
if ex_primary_nic_vlan is not None:
vlan_id = self._vlan_to_vlan_id(ex_primary_nic_vlan)
ET.SubElement(pri_nic, 'vlanId').text = vlan_id
if ex_primary_nic_network_adapter is not None:
ET.SubElement(pri_nic,
"networkAdapter").text = \
ex_primary_nic_network_adapter
if isinstance(ex_additional_nics, (list, tuple)):
for nic in ex_additional_nics:
additional_nic = ET.SubElement(network_elm,
'additionalNic')
if (nic.private_ip_v4 is None and
nic.vlan is None):
raise ValueError("Either a vlan or private_ip_v4 "
"must be specified for each "
"additional nic.")
if (nic.private_ip_v4 is not None and
nic.vlan is not None):
raise ValueError("Either a vlan or private_ip_v4 "
"must be specified for each "
"additional nic. Not both.")
if nic.private_ip_v4 is not None:
ET.SubElement(additional_nic,
'privateIpv4').text = nic.private_ip_v4
if nic.vlan is not None:
vlan_id = self._vlan_to_vlan_id(nic.vlan)
ET.SubElement(additional_nic, 'vlanId').text = vlan_id
if nic.network_adapter_name is not None:
ET.SubElement(additional_nic,
"networkAdapter").text = \
nic.network_adapter_name
elif ex_additional_nics is not None:
raise TypeError(
"ex_additional_NICs must be None or tuple/list")
if isinstance(ex_disks, (list, tuple)):
for disk in ex_disks:
disk_elm = ET.SubElement(server_uncustomized_elm, 'disk')
disk_elm.set('scsiId', disk.scsi_id)
disk_elm.set('speed', disk.speed)
elif ex_disks is not None:
raise TypeError("ex_disks must be None or tuple/list")
# tagid and tagname value pair should not co-exists
if ex_tagid_value_pairs is not None and ex_tagname_value_pairs is \
not None:
raise ValueError("ex_tagid_value_pairs and ex_tagname_value_pairs"
"is mutually exclusive. Use one or the other.")
# Tag by ID
if ex_tagid_value_pairs is not None:
if not isinstance(ex_tagid_value_pairs, dict):
raise ValueError(
"ex_tagid_value_pairs must be a dictionary."
)
if sys.version_info[0] < 3:
tagid_items = ex_tagid_value_pairs.iteritems()
else:
tagid_items = ex_tagid_value_pairs.items()
for k, v in tagid_items:
tag_elem = ET.SubElement(server_uncustomized_elm, 'tagById')
ET.SubElement(tag_elem, 'tagKeyId').text = k
if v is not None:
ET.SubElement(tag_elem, 'value').text = v
if ex_tagname_value_pairs is not None:
if not isinstance(ex_tagname_value_pairs, dict):
raise ValueError(
"ex_tagname_value_pairs must be a dictionary"
)
if sys.version_info[0] < 3:
tags_items = ex_tagname_value_pairs.iteritems()
else:
tags_items = ex_tagname_value_pairs.items()
for k, v in tags_items:
tag_name_elem = ET.SubElement(server_uncustomized_elm, 'tag')
ET.SubElement(tag_name_elem, 'tagKeyName').text = k
if v is not None:
ET.SubElement(tag_name_elem, 'value').text = v
response = self.connection.request_with_orgId_api_2(
'server/deployUncustomizedServer',
method='POST',
data=ET.tostring(server_uncustomized_elm)).object
node_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'serverId':
node_id = info.get('value')
new_node = self.ex_get_node_by_id(node_id)
return new_node
def _format_csv(self, http_response):
text = http_response.read()
lines = str.splitlines(ensure_string(text))
return [line.split(',') for line in lines]
@staticmethod
def _get_tagging_asset_type(asset):
objecttype = type(asset)
if objecttype.__name__ in OBJECT_TO_TAGGING_ASSET_TYPE_MAP:
return OBJECT_TO_TAGGING_ASSET_TYPE_MAP[objecttype.__name__]
raise TypeError("Asset type %s cannot be tagged" % objecttype.__name__)
def _list_nodes_single_page(self, params={}):
nodes = self.connection.request_with_orgId_api_2(
'server/server', params=params).object
return nodes
def _to_tags(self, object):
tags = []
for element in object.findall(fixxpath('tag', TYPES_URN)):
tags.append(self._to_tag(element))
return tags
def _to_tag(self, element):
tag_key = self._to_tag_key(element, from_tag_api=True)
return DimensionDataTag(
asset_type=findtext(element, 'assetType', TYPES_URN),
asset_id=findtext(element, 'assetId', TYPES_URN),
asset_name=findtext(element, 'assetName', TYPES_URN),
datacenter=findtext(element, 'datacenterId', TYPES_URN),
key=tag_key,
value=findtext(element, 'value', TYPES_URN)
)
def _to_tag_keys(self, object):
keys = []
for element in object.findall(fixxpath('tagKey', TYPES_URN)):
keys.append(self._to_tag_key(element))
return keys
def _to_tag_key(self, element, from_tag_api=False):
if from_tag_api:
id = findtext(element, 'tagKeyId', TYPES_URN)
name = findtext(element, 'tagKeyName', TYPES_URN)
else:
id = element.get('id')
name = findtext(element, 'name', TYPES_URN)
return DimensionDataTagKey(
id=id,
name=name,
description=findtext(element, 'description', TYPES_URN),
value_required=self._str2bool(
findtext(element, 'valueRequired', TYPES_URN)
),
display_on_report=self._str2bool(
findtext(element, 'displayOnReport', TYPES_URN)
)
)
def _to_images(self, object, el_name='osImage'):
images = []
locations = self.list_locations()
for element in object.findall(fixxpath(el_name, TYPES_URN)):
images.append(self._to_image(element, locations))
return images
def _to_image(self, element, locations=None):
location_id = element.get('datacenterId')
if locations is None:
locations = self.list_locations(location_id)
location = list(filter(lambda x: x.id == location_id,
locations))[0]
cpu_spec = self._to_cpu_spec(element.find(fixxpath('cpu', TYPES_URN)))
if LooseVersion(self.connection.active_api_version) > LooseVersion(
'2.3'):
os_el = element.find(fixxpath('guest/operatingSystem', TYPES_URN))
else:
os_el = element.find(fixxpath('operatingSystem', TYPES_URN))
if element.tag.endswith('customerImage'):
is_customer_image = True
else:
is_customer_image = False
extra = {
'description': findtext(element, 'description', TYPES_URN),
'OS_type': os_el.get('family'),
'OS_displayName': os_el.get('displayName'),
'cpu': cpu_spec,
'memoryGb': findtext(element, 'memoryGb', TYPES_URN),
'osImageKey': findtext(element, 'osImageKey', TYPES_URN),
'created': findtext(element, 'createTime', TYPES_URN),
'location': location,
'isCustomerImage': is_customer_image
}
return NodeImage(id=element.get('id'),
name=str(findtext(element, 'name', TYPES_URN)),
extra=extra,
driver=self.connection.driver)
def _to_nat_rules(self, object, network_domain):
rules = []
for element in findall(object, 'natRule', TYPES_URN):
rules.append(
self._to_nat_rule(element, network_domain))
return rules
def _to_nat_rule(self, element, network_domain):
return DimensionDataNatRule(
id=element.get('id'),
network_domain=network_domain,
internal_ip=findtext(element, 'internalIp', TYPES_URN),
external_ip=findtext(element, 'externalIp', TYPES_URN),
status=findtext(element, 'state', TYPES_URN))
def _to_anti_affinity_rules(self, object):
rules = []
for element in findall(object, 'antiAffinityRule', TYPES_URN):
rules.append(
self._to_anti_affinity_rule(element))
return rules
def _to_anti_affinity_rule(self, element):
node_list = []
for node in findall(element, 'serverSummary', TYPES_URN):
node_list.append(node.get('id'))
return DimensionDataAntiAffinityRule(
id=element.get('id'),
node_list=node_list
)
def _to_firewall_rules(self, object, network_domain):
rules = []
locations = self.list_locations()
for element in findall(object, 'firewallRule', TYPES_URN):
rules.append(
self._to_firewall_rule(element, locations, network_domain))
return rules
def _to_firewall_rule(self, element, locations, network_domain):
location_id = element.get('datacenterId')
location = list(filter(lambda x: x.id == location_id,
locations))[0]
return DimensionDataFirewallRule(
id=element.get('id'),
network_domain=network_domain,
name=findtext(element, 'name', TYPES_URN),
action=findtext(element, 'action', TYPES_URN),
ip_version=findtext(element, 'ipVersion', TYPES_URN),
protocol=findtext(element, 'protocol', TYPES_URN),
enabled=findtext(element, 'enabled', TYPES_URN),
source=self._to_firewall_address(
element.find(fixxpath('source', TYPES_URN))),
destination=self._to_firewall_address(
element.find(fixxpath('destination', TYPES_URN))),
location=location,
status=findtext(element, 'state', TYPES_URN))
def _to_firewall_address(self, element):
ip = element.find(fixxpath('ip', TYPES_URN))
port = element.find(fixxpath('port', TYPES_URN))
port_list = element.find(fixxpath('portList', TYPES_URN))
address_list = element.find(fixxpath('ipAddressList', TYPES_URN))
if address_list is None:
return DimensionDataFirewallAddress(
any_ip=ip.get('address') == 'ANY',
ip_address=ip.get('address'),
ip_prefix_size=ip.get('prefixSize'),
port_begin=port.get('begin') if port is not None else None,
port_end=port.get('end') if port is not None else None,
port_list_id=port_list.get('id', None)
if port_list is not None else None,
address_list_id=address_list.get('id')
if address_list is not None else None)
else:
return DimensionDataFirewallAddress(
any_ip=False,
ip_address=None,
ip_prefix_size=None,
port_begin=None,
port_end=None,
port_list_id=port_list.get('id', None)
if port_list is not None else None,
address_list_id=address_list.get('id')
if address_list is not None else None)
def _to_ip_blocks(self, object):
blocks = []
locations = self.list_locations()
for element in findall(object, 'publicIpBlock', TYPES_URN):
blocks.append(self._to_ip_block(element, locations))
return blocks
def _to_ip_block(self, element, locations):
location_id = element.get('datacenterId')
location = list(filter(lambda x: x.id == location_id,
locations))[0]
return DimensionDataPublicIpBlock(
id=element.get('id'),
network_domain=self.ex_get_network_domain(
findtext(element, 'networkDomainId', TYPES_URN)
),
base_ip=findtext(element, 'baseIp', TYPES_URN),
size=findtext(element, 'size', TYPES_URN),
location=location,
status=findtext(element, 'state', TYPES_URN))
def _to_networks(self, object):
networks = []
locations = self.list_locations()
for element in findall(object, 'network', NETWORK_NS):
networks.append(self._to_network(element, locations))
return networks
def _to_network(self, element, locations):
multicast = False
if findtext(element, 'multicast', NETWORK_NS) == 'true':
multicast = True
status = self._to_status(element.find(fixxpath('status', NETWORK_NS)))
location_id = findtext(element, 'location', NETWORK_NS)
location = list(filter(lambda x: x.id == location_id,
locations))[0]
return DimensionDataNetwork(
id=findtext(element, 'id', NETWORK_NS),
name=findtext(element, 'name', NETWORK_NS),
description=findtext(element, 'description',
NETWORK_NS),
location=location,
private_net=findtext(element, 'privateNet',
NETWORK_NS),
multicast=multicast,
status=status)
def _to_network_domains(self, object):
network_domains = []
locations = self.list_locations()
for element in findall(object, 'networkDomain', TYPES_URN):
network_domains.append(self._to_network_domain(element, locations))
return network_domains
def _to_network_domain(self, element, locations):
location_id = element.get('datacenterId')
location = list(filter(lambda x: x.id == location_id,
locations))[0]
plan = findtext(element, 'type', TYPES_URN)
if plan is 'ESSENTIALS':
plan_type = NetworkDomainServicePlan.ESSENTIALS
else:
plan_type = NetworkDomainServicePlan.ADVANCED
return DimensionDataNetworkDomain(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
description=findtext(element, 'description', TYPES_URN),
plan=plan_type,
location=location,
status=findtext(element, 'state', TYPES_URN))
def _to_vlans(self, object):
vlans = []
locations = self.list_locations()
for element in findall(object, 'vlan', TYPES_URN):
vlans.append(self._to_vlan(element, locations=locations))
return vlans
def _to_vlan(self, element, locations):
location_id = element.get('datacenterId')
location = list(filter(lambda x: x.id == location_id,
locations))[0]
ip_range = element.find(fixxpath('privateIpv4Range', TYPES_URN))
ip6_range = element.find(fixxpath('ipv6Range', TYPES_URN))
network_domain_el = element.find(
fixxpath('networkDomain', TYPES_URN))
network_domain = self.ex_get_network_domain(
network_domain_el.get('id'))
return DimensionDataVlan(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
description=findtext(element, 'description',
TYPES_URN),
network_domain=network_domain,
private_ipv4_range_address=ip_range.get('address'),
private_ipv4_range_size=int(ip_range.get('prefixSize')),
ipv6_range_address=ip6_range.get('address'),
ipv6_range_size=int(ip6_range.get('prefixSize')),
ipv4_gateway=findtext(
element,
'ipv4GatewayAddress',
TYPES_URN),
ipv6_gateway=findtext(
element,
'ipv6GatewayAddress',
TYPES_URN),
location=location,
status=findtext(element, 'state', TYPES_URN))
def _to_locations(self, object):
locations = []
for element in object.findall(fixxpath('datacenter', TYPES_URN)):
locations.append(self._to_location(element))
return locations
def _to_location(self, element):
l = NodeLocation(id=element.get('id'),
name=findtext(element, 'displayName', TYPES_URN),
country=findtext(element, 'country', TYPES_URN),
driver=self)
return l
def _to_cpu_spec(self, element):
return DimensionDataServerCpuSpecification(
cpu_count=int(element.get('count')),
cores_per_socket=int(element.get('coresPerSocket')),
performance=element.get('speed'))
def _to_vmware_tools(self, element):
status = None
if hasattr(element, 'runningStatus'):
status = element.get('runningStatus')
version_status = None
if hasattr(element, 'version_status'):
version_status = element.get('version_status')
api_version = None
if hasattr(element, 'apiVersion'):
api_version = element.get('apiVersion')
return DimensionDataServerVMWareTools(
status=status,
version_status=version_status,
api_version=api_version)
def _to_disks(self, object):
disk_elements = object.findall(fixxpath('disk', TYPES_URN))
return [self._to_disk(el) for el in disk_elements]
def _to_disk(self, element):
return DimensionDataServerDisk(
id=element.get('id'),
scsi_id=int(element.get('scsiId')),
size_gb=int(element.get('sizeGb')),
speed=element.get('speed'),
state=element.get('state')
)
def _to_nodes(self, object):
node_elements = object.findall(fixxpath('server', TYPES_URN))
return [self._to_node(el) for el in node_elements]
def _to_node(self, element):
started = findtext(element, 'started', TYPES_URN)
status = self._to_status(element.find(fixxpath('progress', TYPES_URN)))
dd_state = findtext(element, 'state', TYPES_URN)
node_state = self._get_node_state(dd_state, started, status.action)
has_network_info \
= element.find(fixxpath('networkInfo', TYPES_URN)) is not None
cpu_spec = self._to_cpu_spec(element.find(fixxpath('cpu', TYPES_URN)))
disks = self._to_disks(element)
# Vmware Tools
# Version 2.3 or earlier
if LooseVersion(self.connection.active_api_version) < LooseVersion(
'2.4'):
vmware_tools = self._to_vmware_tools(
element.find(fixxpath('vmwareTools', TYPES_URN)))
operation_system = element.find(fixxpath(
'operatingSystem', TYPES_URN))
# Version 2.4 or later
else:
vmtools_elm = fixxpath('guest/vmTools', TYPES_URN)
if vmtools_elm is not None:
vmware_tools = self._to_vmware_tools(vmtools_elm)
operation_system = element.find(fixxpath(
'guest/operatingSystem', TYPES_URN))
extra = {
'description': findtext(element, 'description', TYPES_URN),
'sourceImageId': findtext(element, 'sourceImageId', TYPES_URN),
'networkId': findtext(element, 'networkId', TYPES_URN),
'networkDomainId':
element.find(fixxpath('networkInfo', TYPES_URN))
.get('networkDomainId')
if has_network_info else None,
'datacenterId': element.get('datacenterId'),
'deployedTime': findtext(element, 'createTime', TYPES_URN),
'cpu': cpu_spec,
'memoryMb': int(findtext(
element,
'memoryGb',
TYPES_URN)) * 1024,
'OS_id': operation_system.get('id'),
'OS_type': operation_system.get('family'),
'OS_displayName': operation_system.get('displayName'),
'status': status,
'disks': disks,
'vmWareTools': vmware_tools
}
public_ip = findtext(element, 'publicIpAddress', TYPES_URN)
private_ip = element.find(
fixxpath('networkInfo/primaryNic', TYPES_URN)) \
.get('privateIpv4') \
if has_network_info else \
element.find(fixxpath('nic', TYPES_URN)).get('privateIpv4')
extra['ipv6'] = element.find(
fixxpath('networkInfo/primaryNic', TYPES_URN)) \
.get('ipv6') \
if has_network_info else \
element.find(fixxpath('nic', TYPES_URN)).get('ipv6')
n = Node(id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
state=node_state,
public_ips=[public_ip] if public_ip is not None else [],
private_ips=[private_ip] if private_ip is not None else [],
size=self.list_sizes()[0],
image=NodeImage(extra['sourceImageId'],
extra['OS_displayName'],
self.connection.driver),
driver=self.connection.driver,
extra=extra)
return n
def _to_status(self, element):
if element is None:
return DimensionDataStatus()
s = DimensionDataStatus(action=findtext(element, 'action', TYPES_URN),
request_time=findtext(
element,
'requestTime',
TYPES_URN),
user_name=findtext(
element,
'userName',
TYPES_URN),
number_of_steps=findtext(
element,
'numberOfSteps',
TYPES_URN),
step_name=findtext(
element,
'step/name',
TYPES_URN),
step_number=findtext(
element,
'step_number',
TYPES_URN),
step_percent_complete=findtext(
element,
'step/percentComplete',
TYPES_URN),
failure_reason=findtext(
element,
'failureReason',
TYPES_URN))
return s
def _to_ip_address_lists(self, object):
ip_address_lists = []
for element in findall(object, 'ipAddressList', TYPES_URN):
ip_address_lists.append(self._to_ip_address_list(element))
return ip_address_lists
def _to_ip_address_list(self, element):
ipAddresses = []
for ip in findall(element, 'ipAddress', TYPES_URN):
ipAddresses.append(self._to_ip_address(ip))
child_ip_address_lists = []
for child_ip_list in findall(element, 'childIpAddressList',
TYPES_URN):
child_ip_address_lists.append(self
._to_child_ip_list(child_ip_list))
return DimensionDataIpAddressList(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
description=findtext(element, 'description', TYPES_URN),
ip_version=findtext(element, 'ipVersion', TYPES_URN),
ip_address_collection=ipAddresses,
state=findtext(element, 'state', TYPES_URN),
create_time=findtext(element, 'createTime', TYPES_URN),
child_ip_address_lists=child_ip_address_lists
)
def _to_child_ip_list(self, element):
return DimensionDataChildIpAddressList(
id=element.get('id'),
name=element.get('name')
)
def _to_ip_address(self, element):
return DimensionDataIpAddress(
begin=element.get('begin'),
end=element.get('end'),
prefix_size=element.get('prefixSize')
)
def _to_port_lists(self, object):
port_lists = []
for element in findall(object, 'portList', TYPES_URN):
port_lists.append(self._to_port_list(element))
return port_lists
def _to_port_list(self, element):
ports = []
for port in findall(element, 'port', TYPES_URN):
ports.append(self._to_port(element=port))
child_portlist_list = []
for child in findall(element, 'childPortList', TYPES_URN):
child_portlist_list.append(
self._to_child_port_list(element=child))
return DimensionDataPortList(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
description=findtext(element, 'description', TYPES_URN),
port_collection=ports,
child_portlist_list=child_portlist_list,
state=findtext(element, 'state', TYPES_URN),
create_time=findtext(element, 'createTime', TYPES_URN)
)
def _image_needs_auth(self, image):
if not isinstance(image, NodeImage):
image = self.ex_get_image_by_id(image)
if image.extra['isCustomerImage'] and image.extra['OS_type'] == 'UNIX':
return False
return True
@staticmethod
def _to_port(element):
return DimensionDataPort(
begin=element.get('begin'),
end=element.get('end')
)
@staticmethod
def _to_child_port_list(element):
return DimensionDataChildPortList(
id=element.get('id'),
name=element.get('name')
)
@staticmethod
def _get_node_state(state, started, action):
try:
return NODE_STATE_MAP[(state, started, action)]
except KeyError:
if started == 'true':
return NodeState.RUNNING
else:
return NodeState.TERMINATED
@staticmethod
def _node_to_node_id(node):
return dd_object_to_id(node, Node)
@staticmethod
def _location_to_location_id(location):
return dd_object_to_id(location, NodeLocation)
@staticmethod
def _vlan_to_vlan_id(vlan):
return dd_object_to_id(vlan, DimensionDataVlan)
@staticmethod
def _image_to_image_id(image):
return dd_object_to_id(image, NodeImage)
@staticmethod
def _network_to_network_id(network):
return dd_object_to_id(network, DimensionDataNetwork)
@staticmethod
def _anti_affinity_rule_to_anti_affinity_rule_id(rule):
return dd_object_to_id(rule, DimensionDataAntiAffinityRule)
@staticmethod
def _network_domain_to_network_domain_id(network_domain):
return dd_object_to_id(network_domain, DimensionDataNetworkDomain)
@staticmethod
def _tag_key_to_tag_key_id(tag_key):
return dd_object_to_id(tag_key, DimensionDataTagKey)
@staticmethod
def _tag_key_to_tag_key_name(tag_key):
return dd_object_to_id(tag_key, DimensionDataTagKey, id_value='name')
@staticmethod
def _ip_address_list_to_ip_address_list_id(ip_addr_list):
return dd_object_to_id(ip_addr_list, DimensionDataIpAddressList,
id_value='id')
@staticmethod
def _child_ip_address_list_to_child_ip_address_list_id(child_ip_addr_list):
return dd_object_to_id(child_ip_addr_list,
DimensionDataChildIpAddressList,
id_value='id')
@staticmethod
def _port_list_to_port_list_id(port_list):
return dd_object_to_id(port_list, DimensionDataPortList,
id_value='id')
@staticmethod
def _child_port_list_to_child_port_list_id(child_port_list):
return dd_object_to_id(child_port_list,
DimensionDataChildPortList,
id_value='id')
@staticmethod
def _str2bool(string):
return string.lower() in ("true")
| apache-2.0 |
abomyi/django | django/http/__init__.py | 98 | 1186 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (HttpRequest, QueryDict,
RawPostDataException, UnreadablePostError, build_request_repr)
from django.http.response import (
HttpResponse, StreamingHttpResponse, FileResponse,
HttpResponseRedirect, HttpResponsePermanentRedirect,
HttpResponseNotModified, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseNotFound, HttpResponseNotAllowed, HttpResponseGone,
HttpResponseServerError, Http404, BadHeaderError, JsonResponse,
)
from django.http.utils import fix_location_header, conditional_content_removal
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError', 'build_request_repr',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'fix_location_header', 'JsonResponse',
'FileResponse', 'conditional_content_removal',
]
| bsd-3-clause |
koomik/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tf1.py | 16 | 1451 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
_VALID_URL = r'http://videos\.tf1\.fr/.*-(?P<id>.*?)\.html'
_TEST = {
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
'info_dict': {
'id': '10635995',
'ext': 'mp4',
'title': 'Citroën Grand C4 Picasso 2013 : présentation officielle',
'description': 'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.',
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
embed_url = self._html_search_regex(
r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url')
embed_page = self._download_webpage(embed_url, video_id,
'Downloading embed player page')
wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
wat_info = self._download_json(
'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
return self.url_result(wat_info['media']['url'], 'Wat')
| gpl-3.0 |
shaileshgoogler/pyglet | contrib/toys/euclid.py | 44 | 62436 | #!/usr/bin/env python
#
# euclid graphics maths module
#
# Copyright (c) 2006 Alex Holkner
# Alex.Holkner@mail.google.com
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''euclid graphics maths module
Documentation and tests are included in the file "euclid.txt", or online
at http://code.google.com/p/pyeuclid
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
__revision__ = '$Revision$'
import math
import operator
import types
# Some magic here. If _use_slots is True, the classes will derive from
# object and will define a __slots__ class variable. If _use_slots is
# False, classes will be old-style and will not define __slots__.
#
# _use_slots = True: Memory efficient, probably faster in future versions
# of Python, "better".
# _use_slots = False: Ordinary classes, much faster than slots in current
# versions of Python (2.4 and 2.5).
_use_slots = True
# If True, allows components of Vector2 and Vector3 to be set via swizzling;
# e.g. v.xyz = (1, 2, 3). This is much, much slower than the more verbose
# v.x = 1; v.y = 2; v.z = 3, and slows down ordinary element setting as
# well. Recommended setting is False.
_enable_swizzle_set = False
# Requires class to derive from object.
if _enable_swizzle_set:
_use_slots = True
# Implement _use_slots magic.
class _EuclidMetaclass(type):
def __new__(cls, name, bases, dct):
if '__slots__' in dct:
dct['__getstate__'] = cls._create_getstate(dct['__slots__'])
dct['__setstate__'] = cls._create_setstate(dct['__slots__'])
if _use_slots:
return type.__new__(cls, name, bases + (object,), dct)
else:
if '__slots__' in dct:
del dct['__slots__']
return types.ClassType.__new__(types.ClassType, name, bases, dct)
@classmethod
def _create_getstate(cls, slots):
def __getstate__(self):
d = {}
for slot in slots:
d[slot] = getattr(self, slot)
return d
return __getstate__
@classmethod
def _create_setstate(cls, slots):
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
return __setstate__
__metaclass__ = _EuclidMetaclass
class Vector2:
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
def __copy__(self):
return self.__class__(self.x, self.y)
copy = __copy__
def __repr__(self):
return 'Vector2(%.2f, %.2f)' % (self.x, self.y)
def __eq__(self, other):
if isinstance(other, Vector2):
return self.x == other.x and \
self.y == other.y
else:
assert hasattr(other, '__len__') and len(other) == 2
return self.x == other[0] and \
self.y == other[1]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0
def __len__(self):
return 2
def __getitem__(self, key):
return (self.x, self.y)[key]
def __setitem__(self, key, value):
l = [self.x, self.y]
l[key] = value
self.x, self.y = l
def __iter__(self):
return iter((self.x, self.y))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y)['xy'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y]
for c, v in map(None, name, value):
l['xy'.index(c)] = v
self.x, self.y = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector2):
return Vector2(self.x + other.x,
self.y + other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x + other[0],
self.y + other[1])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector2):
self.x += other.x
self.y += other.y
else:
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
if isinstance(other, Vector2):
return Vector2(self.x - other.x,
self.y - other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x - other[0],
self.y - other[1])
def __rsub__(self, other):
if isinstance(other, Vector2):
return Vector2(other.x - self.x,
other.y - self.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(other.x - self[0],
other.y - self[1])
def __mul__(self, other):
assert type(other) in (int, long, float)
return Vector2(self.x * other,
self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(self.x, other),
operator.div(self.y, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(other, self.x),
operator.div(other, self.y))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(self.x, other),
operator.floordiv(self.y, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(other, self.x),
operator.floordiv(other, self.y))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(self.x, other),
operator.truediv(self.y, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(other, self.x),
operator.truediv(other, self.y))
def __neg__(self):
return Vector2(-self.x,
-self.y)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector2(self.x / d,
self.y / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector2)
return self.x * other.x + \
self.y * other.y
def cross(self):
return Vector2(self.y, -self.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector2)
d = 2 * (self.x * normal.x + self.y * normal.y)
return Vector2(self.x - d * normal.x,
self.y - d * normal.y)
class Vector3:
__slots__ = ['x', 'y', 'z']
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __eq__(self, other):
if isinstance(other, Vector3):
return self.x == other.x and \
self.y == other.y and \
self.z == other.z
else:
assert hasattr(other, '__len__') and len(other) == 3
return self.x == other[0] and \
self.y == other[1] and \
self.z == other[2]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __len__(self):
return 3
def __getitem__(self, key):
return (self.x, self.y, self.z)[key]
def __setitem__(self, key, value):
l = [self.x, self.y, self.z]
l[key] = value
self.x, self.y, self.z = l
def __iter__(self):
return iter((self.x, self.y, self.z))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y, self.z]
for c, v in map(None, name, value):
l['xyz'.index(c)] = v
self.x, self.y, self.z = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector3):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return _class(self.x + other.x,
self.y + other.y,
self.z + other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x + other[0],
self.y + other[1],
self.z + other[2])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector3):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other[0]
self.y += other[1]
self.z += other[2]
return self
def __sub__(self, other):
if isinstance(other, Vector3):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return Vector3(self.x - other.x,
self.y - other.y,
self.z - other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x - other[0],
self.y - other[1],
self.z - other[2])
def __rsub__(self, other):
if isinstance(other, Vector3):
return Vector3(other.x - self.x,
other.y - self.y,
other.z - self.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(other.x - self[0],
other.y - self[1],
other.z - self[2])
def __mul__(self, other):
if isinstance(other, Vector3):
# TODO component-wise mul/div in-place and on Vector2; docs.
if self.__class__ is Point3 or other.__class__ is Point3:
_class = Point3
else:
_class = Vector3
return _class(self.x * other.x,
self.y * other.y,
self.z * other.z)
else:
assert type(other) in (int, long, float)
return Vector3(self.x * other,
self.y * other,
self.z * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(self.x, other),
operator.div(self.y, other),
operator.div(self.z, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(other, self.x),
operator.div(other, self.y),
operator.div(other, self.z))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(self.x, other),
operator.floordiv(self.y, other),
operator.floordiv(self.z, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(other, self.x),
operator.floordiv(other, self.y),
operator.floordiv(other, self.z))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(self.x, other),
operator.truediv(self.y, other),
operator.truediv(self.z, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(other, self.x),
operator.truediv(other, self.y),
operator.truediv(other, self.z))
def __neg__(self):
return Vector3(-self.x,
-self.y,
-self.z)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector3(self.x / d,
self.y / d,
self.z / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector3)
return self.x * other.x + \
self.y * other.y + \
self.z * other.z
def cross(self, other):
assert isinstance(other, Vector3)
return Vector3(self.y * other.z - self.z * other.y,
-self.x * other.z + self.z * other.x,
self.x * other.y - self.y * other.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector3)
d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z)
return Vector3(self.x - d * normal.x,
self.y - d * normal.y,
self.z - d * normal.z)
class AffineVector3(Vector3):
w = 1
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f, 1.00)' % (self.x,
self.y,
self.z)
def __len__(self):
return 4
def __getitem__(self, key):
return (self.x, self.y, self.z, 1)[key]
def __iter__(self):
return iter((self.x, self.y, self.z, 1))
# a b c
# e f g
# i j k
class Matrix3:
__slots__ = list('abcefgijk')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix3()
M.a = self.a
M.b = self.b
M.c = self.c
M.e = self.e
M.f = self.f
M.g = self.g
M.i = self.i
M.j = self.j
M.k = self.k
return M
copy = __copy__
def __repr__(self):
return ('Matrix3([% 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c,
self.e, self.f, self.g,
self.i, self.j, self.k)
def __getitem__(self, key):
return [self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k][key]
def __setitem__(self, key, value):
L = self[:]
L[key] = value
(self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k) = L
def __mul__(self, other):
if isinstance(other, Matrix3):
# Caching repeatedly accessed attributes in local variables
# apparently increases performance by 20%. Attrib: Will McGugan.
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
C = Matrix3()
C.a = Aa * Ba + Ab * Be + Ac * Bi
C.b = Aa * Bb + Ab * Bf + Ac * Bj
C.c = Aa * Bc + Ab * Bg + Ac * Bk
C.e = Ae * Ba + Af * Be + Ag * Bi
C.f = Ae * Bb + Af * Bf + Ag * Bj
C.g = Ae * Bc + Af * Bg + Ag * Bk
C.i = Ai * Ba + Aj * Be + Ak * Bi
C.j = Ai * Bb + Aj * Bf + Ak * Bj
C.k = Ai * Bc + Aj * Bg + Ak * Bk
return C
elif isinstance(other, Point2):
A = self
B = other
P = Point2(0, 0)
P.x = A.a * B.x + A.b * B.y + A.c
P.y = A.e * B.x + A.f * B.y + A.g
return P
elif isinstance(other, Vector2):
A = self
B = other
V = Vector2(0, 0)
V.x = A.a * B.x + A.b * B.y
V.y = A.e * B.x + A.f * B.y
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix3)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
self.a = Aa * Ba + Ab * Be + Ac * Bi
self.b = Aa * Bb + Ab * Bf + Ac * Bj
self.c = Aa * Bc + Ab * Bg + Ac * Bk
self.e = Ae * Ba + Af * Be + Ag * Bi
self.f = Ae * Bb + Af * Bf + Ag * Bj
self.g = Ae * Bc + Af * Bg + Ag * Bk
self.i = Ai * Ba + Aj * Be + Ak * Bi
self.j = Ai * Bb + Aj * Bf + Ak * Bj
self.k = Ai * Bc + Aj * Bg + Ak * Bk
return self
def identity(self):
self.a = self.f = self.k = 1.
self.b = self.c = self.e = self.g = self.i = self.j = 0
return self
def scale(self, x, y):
self *= Matrix3.new_scale(x, y)
return self
def translate(self, x, y):
self *= Matrix3.new_translate(x, y)
return self
def rotate(self, angle):
self *= Matrix3.new_rotate(angle)
return self
# Static constructors
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y):
self = cls()
self.a = x
self.f = y
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y):
self = cls()
self.c = x
self.g = y
return self
new_translate = classmethod(new_translate)
def new_rotate(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotate = classmethod(new_rotate)
# a b c d
# e f g h
# i j k l
# m n o p
class Matrix4:
__slots__ = list('abcdefghijklmnop')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix4()
M.a = self.a
M.b = self.b
M.c = self.c
M.d = self.d
M.e = self.e
M.f = self.f
M.g = self.g
M.h = self.h
M.i = self.i
M.j = self.j
M.k = self.k
M.l = self.l
M.m = self.m
M.n = self.n
M.o = self.o
M.p = self.p
return M
copy = __copy__
def __repr__(self):
return ('Matrix4([% 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def __getitem__(self, key):
return [self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p][key]
def __setitem__(self, key, value):
assert not isinstance(key, slice) or \
key.stop - key.start == len(value), 'key length != value length'
L = self[:]
L[key] = value
(self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p) = L
def __mul__(self, other):
if isinstance(other, Matrix4):
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
C = Matrix4()
C.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
C.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
C.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
C.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
C.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
C.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
C.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
C.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
C.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
C.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
C.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
C.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
C.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
C.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
C.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
C.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return C
elif isinstance(other, Point3):
A = self
B = other
P = Point3(0, 0, 0)
P.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
P.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
P.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
return P
elif isinstance(other, AffineVector3):
A = self
B = other
V = AffineVector3(0, 0, 0)
V.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d * B.w
V.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h * B.w
V.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l * B.w
return V
elif isinstance(other, Vector3):
A = self
B = other
V = Vector3(0, 0, 0)
V.x = A.a * B.x + A.b * B.y + A.c * B.z
V.y = A.e * B.x + A.f * B.y + A.g * B.z
V.z = A.i * B.x + A.j * B.y + A.k * B.z
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix4)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
self.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
self.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
self.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
self.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
self.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
self.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
self.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
self.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
self.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
self.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
self.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
self.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
self.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
self.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
self.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
self.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return self
def identity(self):
self.a = self.f = self.k = self.p = 1.
self.b = self.c = self.d = self.e = self.g = self.h = \
self.i = self.j = self.l = self.m = self.n = self.o = 0
return self
def scale(self, x, y, z):
self *= Matrix4.new_scale(x, y, z)
return self
def translate(self, x, y, z):
self *= Matrix4.new_translate(x, y, z)
return self
def rotatex(self, angle):
self *= Matrix4.new_rotatex(angle)
return self
def rotatey(self, angle):
self *= Matrix4.new_rotatey(angle)
return self
def rotatez(self, angle):
self *= Matrix4.new_rotatez(angle)
return self
def rotate_axis(self, angle, axis):
self *= Matrix4.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Matrix4.new_rotate_euler(heading, attitude, bank)
return self
# Static constructors
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y, z):
self = cls()
self.a = x
self.f = y
self.k = z
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y, z):
self = cls()
self.d = x
self.h = y
self.l = z
return self
new_translate = classmethod(new_translate)
def new_rotatex(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.f = self.k = c
self.g = -s
self.j = s
return self
new_rotatex = classmethod(new_rotatex)
def new_rotatey(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.k = c
self.c = s
self.i = -s
return self
new_rotatey = classmethod(new_rotatey)
def new_rotatez(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotatez = classmethod(new_rotatez)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
vector = axis.normalized()
x = vector.x
y = vector.y
z = vector.z
self = cls()
s = math.sin(angle)
c = math.cos(angle)
c1 = 1. - c
# from the glRotate man page
self.a = x * x * c1 + c
self.b = x * y * c1 - z * s
self.c = x * z * c1 + y * s
self.e = y * x * c1 + z * s
self.f = y * y * c1 + c
self.g = y * z * c1 - x * s
self.i = x * z * c1 - y * s
self.j = y * z * c1 + x * s
self.k = z * z * c1 + c
return self
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
# from http://www.euclideanspace.com/
ch = math.cos(heading)
sh = math.sin(heading)
ca = math.cos(attitude)
sa = math.sin(attitude)
cb = math.cos(bank)
sb = math.sin(bank)
self = cls()
self.a = ch * ca
self.b = sh * sb - ch * sa * cb
self.c = ch * sa * sb + sh * cb
self.e = sa
self.f = ca * cb
self.g = -ca * sb
self.i = -sh * ca
self.j = sh * sa * cb + ch * sb
self.k = -sh * sa * sb + ch * cb
return self
new_rotate_euler = classmethod(new_rotate_euler)
def new_perspective(cls, fov_y, aspect, near, far):
# from the gluPerspective man page
f = 1 / math.tan(fov_y / 2)
self = cls()
assert near != 0.0 and near != far
self.a = f / aspect
self.f = f
self.k = (far + near) / (near - far)
self.l = 2 * far * near / (near - far)
self.o = -1
self.p = 0
return self
new_perspective = classmethod(new_perspective)
class Quaternion:
# All methods and naming conventions based off
# http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions
# w is the real part, (x, y, z) are the imaginary parts
__slots__ = ['w', 'x', 'y', 'z']
def __init__(self):
self.identity()
def __copy__(self):
Q = Quaternion()
Q.w = self.w
Q.x = self.x
Q.y = self.y
Q.z = self.z
copy = __copy__
def __repr__(self):
return 'Quaternion(real=%.2f, imag=<%.2f, %.2f, %.2f>)' % \
(self.w, self.x, self.y, self.z)
def __mul__(self, other):
if isinstance(other, Quaternion):
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
Q = Quaternion()
Q.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
Q.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
Q.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
Q.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return Q
elif isinstance(other, Vector3):
w = self.w
x = self.x
y = self.y
z = self.z
Vx = other.x
Vy = other.y
Vz = other.z
return other.__class__(\
w * w * Vx + 2 * y * w * Vz - 2 * z * w * Vy + \
x * x * Vx + 2 * y * x * Vy + 2 * z * x * Vz - \
z * z * Vx - y * y * Vx,
2 * x * y * Vx + y * y * Vy + 2 * z * y * Vz + \
2 * w * z * Vx - z * z * Vy + w * w * Vy - \
2 * x * w * Vz - x * x * Vy,
2 * x * z * Vx + 2 * y * z * Vy + \
z * z * Vz - 2 * w * y * Vx - y * y * Vz + \
2 * w * x * Vy - x * x * Vz + w * w * Vz)
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Quaternion)
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
self.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
self.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
self.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
self.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return self
def __abs__(self):
return math.sqrt(self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def identity(self):
self.w = 1
self.x = 0
self.y = 0
self.z = 0
return self
def rotate_axis(self, angle, axis):
self *= Quaternion.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Quaternion.new_rotate_euler(heading, attitude, bank)
return self
def conjugated(self):
Q = Quaternion()
Q.w = self.w
Q.x = -self.x
Q.y = -self.y
Q.z = -self.z
return Q
def normalize(self):
d = self.magnitude()
if d != 0:
self.w /= d
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d != 0:
Q = Quaternion()
Q.w /= d
Q.x /= d
Q.y /= d
Q.z /= d
return Q
else:
return self.copy()
def get_angle_axis(self):
if self.w > 1:
self = self.normalized()
angle = 2 * math.acos(self.w)
s = math.sqrt(1 - self.w ** 2)
if s < 0.001:
return angle, Vector3(1, 0, 0)
else:
return angle, Vector3(self.x / s, self.y / s, self.z / s)
def get_euler(self):
t = self.x * self.y + self.z * self.w
if t > 0.4999:
heading = 2 * math.atan2(self.x, self.w)
attitude = math.pi / 2
bank = 0
elif t < -0.4999:
heading = -2 * math.atan2(self.x, self.w)
attitude = -math.pi / 2
bank = 0
else:
sqx = self.x ** 2
sqy = self.y ** 2
sqz = self.z ** 2
heading = math.atan2(2 * self.y * self.w - 2 * self.x * self.z,
1 - 2 * sqy - 2 * sqz)
attitude = math.asin(2 * t)
bank = math.atan2(2 * self.x * self.w - 2 * self.y * self.z,
1 - 2 * sqx - 2 * sqz)
return heading, attitude, bank
def get_matrix(self):
xx = self.x ** 2
xy = self.x * self.y
xz = self.x * self.z
xw = self.x * self.w
yy = self.y ** 2
yz = self.y * self.z
yw = self.y * self.w
zz = self.z ** 2
zw = self.z * self.w
M = Matrix4()
M.a = 1 - 2 * (yy + zz)
M.b = 2 * (xy - zw)
M.c = 2 * (xz + yw)
M.e = 2 * (xy + zw)
M.f = 1 - 2 * (xx + zz)
M.g = 2 * (yz - xw)
M.i = 2 * (xz - yw)
M.j = 2 * (yz + xw)
M.k = 1 - 2 * (xx + yy)
return M
# Static constructors
def new_identity(cls):
return cls()
new_identity = classmethod(new_identity)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
axis = axis.normalized()
s = math.sin(angle / 2)
Q = cls()
Q.w = math.cos(angle / 2)
Q.x = axis.x * s
Q.y = axis.y * s
Q.z = axis.z * s
return Q
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
Q = cls()
c1 = math.cos(heading / 2)
s1 = math.sin(heading / 2)
c2 = math.cos(attitude / 2)
s2 = math.sin(attitude / 2)
c3 = math.cos(bank / 2)
s3 = math.sin(bank / 2)
Q.w = c1 * c2 * c3 - s1 * s2 * s3
Q.x = s1 * s2 * c3 + c1 * c2 * s3
Q.y = s1 * c2 * c3 + c1 * s2 * s3
Q.z = c1 * s2 * c3 - s1 * c2 * s3
return Q
new_rotate_euler = classmethod(new_rotate_euler)
def new_interpolate(cls, q1, q2, t):
assert isinstance(q1, Quaternion) and isinstance(q2, Quaternion)
Q = cls()
costheta = q1.w * q2.w + q1.x * q2.x + q1.y * q2.y + q1.z * q2.z
theta = math.acos(costheta)
if abs(theta) < 0.01:
Q.w = q2.w
Q.x = q2.x
Q.y = q2.y
Q.z = q2.z
return Q
sintheta = math.sqrt(1.0 - costheta * costheta)
if abs(sintheta) < 0.01:
Q.w = (q1.w + q2.w) * 0.5
Q.x = (q1.x + q2.x) * 0.5
Q.y = (q1.y + q2.y) * 0.5
Q.z = (q1.z + q2.z) * 0.5
return Q
ratio1 = math.sin((1 - t) * theta) / sintheta
ratio2 = math.sin(t * theta) / sintheta
Q.w = q1.w * ratio1 + q2.w * ratio2
Q.x = q1.x * ratio1 + q2.x * ratio2
Q.y = q1.y * ratio1 + q2.y * ratio2
Q.z = q1.z * ratio1 + q2.z * ratio2
return Q
new_interpolate = classmethod(new_interpolate)
# Geometry
# Much maths thanks to Paul Bourke, http://astronomy.swin.edu.au/~pbourke
# ---------------------------------------------------------------------------
class Geometry:
def _connect_unimplemented(self, other):
raise AttributeError, 'Cannot connect %s to %s' % \
(self.__class__, other.__class__)
def _intersect_unimplemented(self, other):
raise AttributeError, 'Cannot intersect %s and %s' % \
(self.__class__, other.__class__)
_intersect_point2 = _intersect_unimplemented
_intersect_line2 = _intersect_unimplemented
_intersect_circle = _intersect_unimplemented
_connect_point2 = _connect_unimplemented
_connect_line2 = _connect_unimplemented
_connect_circle = _connect_unimplemented
_intersect_point3 = _intersect_unimplemented
_intersect_line3 = _intersect_unimplemented
_intersect_sphere = _intersect_unimplemented
_intersect_plane = _intersect_unimplemented
_connect_point3 = _connect_unimplemented
_connect_line3 = _connect_unimplemented
_connect_sphere = _connect_unimplemented
_connect_plane = _connect_unimplemented
def intersect(self, other):
raise NotImplementedError
def connect(self, other):
raise NotImplementedError
def distance(self, other):
c = self.connect(other)
if c:
return c.length
return 0.0
def _intersect_point2_circle(P, C):
return abs(P - C.c) <= C.r
def _intersect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
return None
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
return None
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
return None
return Point2(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y)
def _intersect_line2_circle(L, C):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - C.c.x) + \
L.v.y * (L.p.y - C.c.y))
c = C.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * C.c.dot(L.p) - \
C.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
return LineSegment2(Point2(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y),
Point2(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y))
def _connect_point2_line2(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment2(P,
Point2(L.p.x + u * L.v.x,
L.p.y + u * L.v.y))
def _connect_point2_circle(P, C):
v = P - C.c
v.normalize()
v *= C.r
return LineSegment2(P, Point2(C.c.x + v.x, C.c.y + v.y))
def _connect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray2) or isinstance(B, LineSegment2):
p1, p2 = _connect_point2_line2(B.p, A)
return p2, p1
# No endpoint (or endpoint is on A), possibly choose arbitrary point
# on line.
return _connect_point2_line2(A.p, B)
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment2(Point2(A.p.x + ua * A.v.x, A.p.y + ua * A.v.y),
Point2(B.p.x + ub * B.v.x, B.p.y + ub * B.v.y))
def _connect_circle_line2(C, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((C.c.x - L.p.x) * L.v.x + (C.c.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point2(L.p.x + u * L.v.x, L.p.y + u * L.v.y)
v = (point - C.c)
v.normalize()
v *= C.r
return LineSegment2(Point2(C.c.x + v.x, C.c.y + v.y), point)
def _connect_circle_circle(A, B):
v = B.c - A.c
v.normalize()
return LineSegment2(Point2(A.c.x + v.x * A.r, A.c.y + v.y * A.r),
Point2(B.c.x - v.x * B.r, B.c.y - v.y * B.r))
class Point2(Vector2, Geometry):
def __repr__(self):
return 'Point2(%.2f, %.2f)' % (self.x, self.y)
def intersect(self, other):
return other._intersect_point2(self)
def _intersect_circle(self, other):
return _intersect_point2_circle(self, other)
def connect(self, other):
return other._connect_point2(self)
def _connect_point2(self, other):
return LineSegment2(other, self)
def _connect_line2(self, other):
c = _connect_point2_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
c = _connect_point2_circle(self, other)
if c:
return c._swap()
class Line2(Geometry):
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point2) and \
isinstance(args[1], Vector2) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point2) and isinstance(args[1], Point2):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point2) and isinstance(args[1], Vector2):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line2):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.v:
raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point2(self.p.x + self.v.x,
self.p.y + self.v.y))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line2(self)
def _intersect_line2(self, other):
return _intersect_line2_line2(self, other)
def _intersect_circle(self, other):
return _intersect_line2_circle(self, other)
def connect(self, other):
return other._connect_line2(self)
def _connect_point2(self, other):
return _connect_point2_line2(other, self)
def _connect_line2(self, other):
return _connect_line2_line2(other, self)
def _connect_circle(self, other):
return _connect_circle_line2(other, self)
class Ray2(Line2):
def __repr__(self):
return 'Ray2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
def _u_in(self, u):
return u >= 0.0
class LineSegment2(Line2):
def __repr__(self):
return 'LineSegment2(<%.2f, %.2f> to <%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.x + self.v.x, self.p.y + self.v.y)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Circle(Geometry):
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector2) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Circle(<%.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_circle(self)
def _intersect_point2(self, other):
return _intersect_point2_circle(other, self)
def _intersect_line2(self, other):
return _intersect_line2_circle(other, self)
def connect(self, other):
return other._connect_circle(self)
def _connect_point2(self, other):
return _connect_point2_circle(other, self)
def _connect_line2(self, other):
c = _connect_circle_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
return _connect_circle_circle(other, self)
# 3D Geometry
# -------------------------------------------------------------------------
def _connect_point3_line3(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y + \
(P.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment3(P, Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z))
def _connect_point3_sphere(P, S):
v = P - S.c
v.normalize()
v *= S.r
return LineSegment3(P, Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z))
def _connect_point3_plane(p, plane):
n = plane.n.normalized()
d = p.dot(plane.n) - plane.k
return LineSegment3(p, Point3(p.x - n.x * d, p.y - n.y * d, p.z - n.z * d))
def _connect_line3_line3(A, B):
assert A.v and B.v
p13 = A.p - B.p
d1343 = p13.dot(B.v)
d4321 = B.v.dot(A.v)
d1321 = p13.dot(A.v)
d4343 = B.v.magnitude_squared()
denom = A.v.magnitude_squared() * d4343 - d4321 ** 2
if denom == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray3) or isinstance(B, LineSegment3):
return _connect_point3_line3(B.p, A)._swap()
# No endpoint (or endpoint is on A), possibly choose arbitrary
# point on line.
return _connect_point3_line3(A.p, B)
ua = (d1343 * d4321 - d1321 * d4343) / denom
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (d1343 + d4321 * ua) / d4343
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment3(Point3(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y,
A.p.z + ua * A.v.z),
Point3(B.p.x + ub * B.v.x,
B.p.y + ub * B.v.y,
B.p.z + ub * B.v.z))
def _connect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel, choose an endpoint
return _connect_point3_plane(L.p, P)
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
# intersects out of range, choose nearest endpoint
u = max(min(u, 1.0), 0.0)
return _connect_point3_plane(Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z), P)
# Intersection
return None
def _connect_sphere_line3(S, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((S.c.x - L.p.x) * L.v.x + \
(S.c.y - L.p.y) * L.v.y + \
(S.c.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point3(L.p.x + u * L.v.x, L.p.y + u * L.v.y, L.p.z + u * L.v.z)
v = (point - S.c)
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
point)
def _connect_sphere_sphere(A, B):
v = B.c - A.c
v.normalize()
return LineSegment3(Point3(A.c.x + v.x * A.r,
A.c.y + v.y * A.r,
A.c.x + v.z * A.r),
Point3(B.c.x + v.x * B.r,
B.c.y + v.y * B.r,
B.c.x + v.z * B.r))
def _connect_sphere_plane(S, P):
c = _connect_point3_plane(S.c, P)
if not c:
return None
p2 = c.p2
v = p2 - S.c
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
p2)
def _connect_plane_plane(A, B):
if A.n.cross(B.n):
# Planes intersect
return None
else:
# Planes are parallel, connect to arbitrary point
return _connect_point3_plane(A._get_point(), B)
def _intersect_point3_sphere(P, S):
return abs(P - S.c) <= S.r
def _intersect_line3_sphere(L, S):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - S.c.x) + \
L.v.y * (L.p.y - S.c.y) + \
L.v.z * (L.p.z - S.c.z))
c = S.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * S.c.dot(L.p) - \
S.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
return LineSegment3(Point3(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y,
L.p.z + u1 * L.v.z),
Point3(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y,
L.p.z + u2 * L.v.z))
def _intersect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel
return None
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
return None
return Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z)
def _intersect_plane_plane(A, B):
n1_m = A.n.magnitude_squared()
n2_m = B.n.magnitude_squared()
n1d2 = A.n.dot(B.n)
det = n1_m * n2_m - n1d2 ** 2
if det == 0:
# Parallel
return None
c1 = (A.k * n2_m - B.k * n1d2) / det
c2 = (B.k * n1_m - A.k * n1d2) / det
return Line3(Point3(c1 * A.n.x + c2 * B.n.x,
c1 * A.n.y + c2 * B.n.y,
c1 * A.n.z + c2 * B.n.z),
A.n.cross(B.n))
class Point3(Vector3, Geometry):
def __repr__(self):
return 'Point3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z)
def intersect(self, other):
return other._intersect_point3(self)
def _intersect_sphere(self, other):
return _intersect_point3_sphere(self, other)
def connect(self, other):
return other._connect_point3(self)
def _connect_point3(self, other):
if self != other:
return LineSegment3(other, self)
return None
def _connect_line3(self, other):
c = _connect_point3_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
c = _connect_point3_sphere(self, other)
if c:
return c._swap()
def _connect_plane(self, other):
c = _connect_point3_plane(self, other)
if c:
return c._swap()
class Line3:
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Vector3) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Point3):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line3):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
# XXX This is annoying.
#if not self.v:
# raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point3(self.p.x + self.v.x,
self.p.y + self.v.y,
self.p.z + self.v.z))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line3(self)
def _intersect_sphere(self, other):
return _intersect_line3_sphere(self, other)
def _intersect_plane(self, other):
return _intersect_line3_plane(self, other)
def connect(self, other):
return other._connect_line3(self)
def _connect_point3(self, other):
return _connect_point3_line3(other, self)
def _connect_line3(self, other):
return _connect_line3_line3(other, self)
def _connect_sphere(self, other):
return _connect_sphere_line3(other, self)
def _connect_plane(self, other):
c = _connect_line3_plane(self, other)
if c:
return c
class Ray3(Line3):
def __repr__(self):
return 'Ray3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
def _u_in(self, u):
return u >= 0.0
class LineSegment3(Line3):
def __repr__(self):
return 'LineSegment3(<%.2f, %.2f, %.2f> to <%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z,
self.p.x + self.v.x, self.p.y + self.v.y, self.p.z + self.v.z)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Sphere:
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector3) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Sphere(<%.2f, %.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.c.z, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_sphere(self)
def _intersect_point3(self, other):
return _intersect_point3_sphere(other, self)
def _intersect_line3(self, other):
return _intersect_line3_sphere(other, self)
def connect(self, other):
return other._connect_sphere(self)
def _connect_point3(self, other):
return _connect_point3_sphere(other, self)
def _connect_line3(self, other):
c = _connect_sphere_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
return _connect_sphere_sphere(other, self)
def _connect_plane(self, other):
c = _connect_sphere_plane(self, other)
if c:
return c
class Plane:
# n.p = k, where n is normal, p is point on plane, k is constant scalar
__slots__ = ['n', 'k']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Point3) and \
isinstance(args[2], Point3)
self.n = (args[1] - args[0]).cross(args[2] - args[0])
self.n.normalize()
self.k = self.n.dot(args[0])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.n = args[1].normalized()
self.k = self.n.dot(args[0])
elif isinstance(args[0], Vector3) and type(args[1]) == float:
self.n = args[0].normalized()
self.k = args[1]
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.n:
raise AttributeError, 'Points on plane are colinear'
def __copy__(self):
return self.__class__(self.n, self.k)
copy = __copy__
def __repr__(self):
return 'Plane(<%.2f, %.2f, %.2f>.p = %.2f)' % \
(self.n.x, self.n.y, self.n.z, self.k)
def _get_point(self):
# Return an arbitrary point on the plane
if self.n.z:
return Point3(0., 0., self.k / self.n.z)
elif self.n.y:
return Point3(0., self.k / self.n.y, 0.)
else:
return Point3(self.k / self.n.x, 0., 0.)
def _apply_transform(self, t):
p = t * self._get_point()
self.n = t * self.n
self.k = self.n.dot(p)
def intersect(self, other):
return other._intersect_plane(self)
def _intersect_line3(self, other):
return _intersect_line3_plane(other, self)
def _intersect_plane(self, other):
return _intersect_plane_plane(self, other)
def connect(self, other):
return other._connect_plane(self)
def _connect_point3(self, other):
return _connect_point3_plane(other, self)
def _connect_line3(self, other):
return _connect_line3_plane(other, self)
def _connect_sphere(self, other):
return _connect_sphere_plane(other, self)
def _connect_plane(self, other):
return _connect_plane_plane(other, self)
| bsd-3-clause |
ArvinDevel/incubator-pulsar | pulsar-functions/instance/src/main/python/server.py | 7 | 2299 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
from concurrent import futures
from log import Log
import grpc
import InstanceCommunication_pb2_grpc
class InstanceCommunicationServicer(InstanceCommunication_pb2_grpc.InstanceControlServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(self, pyinstance):
self.pyinstance = pyinstance
def GetFunctionStatus(self, request, context):
Log.debug("Came in GetFunctionStatus")
return self.pyinstance.get_function_status()
def GetAndResetMetrics(self, request, context):
Log.debug("Came in GetAndResetMetrics")
return self.pyinstance.get_and_reset_metrics()
def ResetMetrics(self, request, context):
Log.debug("Came in ResetMetrics")
self.pyinstance.reset_metrics()
return request
def GetMetrics(self, request, context):
Log.debug("Came in GetMetrics")
return self.pyinstance.get_metrics()
def HealthCheck(self, request, context):
return self.pyinstance.health_check()
def serve(port, pyinstance):
server_instance = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
InstanceCommunication_pb2_grpc.add_InstanceControlServicer_to_server(
InstanceCommunicationServicer(pyinstance), server_instance)
server_instance.add_insecure_port('[::]:%d' % port)
Log.info("Serving InstanceCommunication on port %d" % int(port))
server_instance.start()
return server_instance
| apache-2.0 |
dsfsdgsbngfggb/odoo | addons/crm/report/crm_lead_report.py | 309 | 5123 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
class crm_lead_report(osv.Model):
""" CRM Lead Analysis """
_name = "crm.lead.report"
_auto = False
_description = "CRM Lead Analysis"
_rec_name = 'date_deadline'
_inherit = ["crm.tracking.mixin"]
_columns = {
'date_deadline': fields.date('Exp. Closing', readonly=True, help="Expected Closing"),
'create_date': fields.datetime('Creation Date', readonly=True),
'opening_date': fields.datetime('Assignation Date', readonly=True),
'date_closed': fields.datetime('Close Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'nbr_cases': fields.integer("# of Cases", readonly=True),
# durations
'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"),
'planned_revenue': fields.float('Total Revenue',digits=(16,2),readonly=True), # TDE FIXME master: rename into total_revenue
'probable_revenue': fields.float('Expected Revenue', digits=(16,2),readonly=True), # TDE FIXME master: rename into expected_revenue
'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity'),
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_lead_report')
cr.execute("""
CREATE OR REPLACE VIEW crm_lead_report AS (
SELECT
id,
c.date_deadline,
count(id) as nbr_cases,
c.date_open as opening_date,
c.date_closed as date_closed,
c.date_last_stage_update as date_last_stage_update,
c.user_id,
c.probability,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.campaign_id,
c.source_id,
c.medium_id,
c.partner_id,
c.country_id,
c.planned_revenue as planned_revenue,
c.planned_revenue*(c.probability/100) as probable_revenue,
c.create_date as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
WHERE c.active = 'true'
GROUP BY c.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nox/servo | tests/wpt/web-platform-tests/conformance-checkers/tools/picture.py | 238 | 29212 | # -*- coding: utf-8 -*-
import os
ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
template = """<!DOCTYPE html>
<meta charset=utf-8>
"""
errors = {
# missing src on img
"img-no-src": "<img alt>",
"img-no-src-with-srcset": "<img srcset=x alt>",
"img-no-src-with-picture": "<picture><img alt></picture>",
"img-no-src-with-srcset-and-picture": "<picture><img srcset=x alt></picture>",
"img-no-src-with-source": "<picture><source srcset=x><img alt></picture>",
# junk content in picture
"junk-text-before-img": "<picture>x<img src=x alt></picture>",
"junk-text-after-img": "<picture><img src=x alt>x</picture>",
"junk-text-before-source": "<picture>x<source srcset=x><img src=x alt></picture>",
"junk-text-after-source": "<picture><source srcset=x>x<img src=x alt></picture>",
"junk-br-before-img": "<picture><br><img src=x alt></picture>",
"junk-br-after-img": "<picture><img src=x alt><br></picture>",
"junk-br-before-source": "<picture><br><source srcset=x><img src=x alt></picture>",
"junk-br-after-source": "<picture><source srcset=x><br><img src=x alt></picture>",
"junk-video-before": "<picture><video></video><source srcset=x><img src=x alt></picture>",
"junk-video-no-img": "<picture><video></video></picture>",
"junk-p-before": "<picture><p></p><source srcset=x><img src=x alt></picture>",
"junk-p-after": "<picture><source srcset=x><img src=x alt><p></p></picture>",
"junk-p-wrapping": "<picture><p><source srcset=x><img src=x alt></p></picture>",
"junk-span-before": "<picture><span></span><source srcset=x><img src=x alt></picture>",
"junk-span-after": "<picture><source srcset=x><img src=x alt><span></span></picture>",
"junk-span-wrapping": "<picture><span><source srcset=x><img src=x alt></span></picture>",
"junk-picture-before": "<picture><picture><img src=x alt></picture><img src=x alt></picture>",
"junk-picture-wrapping": "<picture><picture><img src=x alt></picture></picture>",
"junk-figure-wrapping": "<picture><figure><img src=x alt></figure></picture>",
"junk-input-type-hidden": "<picture><input type=hidden name=x value=x><img src=x alt></picture>",
"junk-style-scroped": "<picture><style scroped></style><img src=x alt></picture>",
"junk-noscript": "<picture><img src=x alt><noscript></noscript></picture>",
"junk-noscript-after-source-no-img": "<picture><source srcset=x><noscript><img src=x alt></noscript></picture>",
"junk-svg": "<picture><img src=x alt><svg></svg></picture>",
"junk-svg-no-img": "<picture><svg></svg></picture>",
"junk-math-nog-img": "<picture><math></math></picture>",
# parents
"parent-ul": "<ul><picture><img src=x alt></picture></ul>",
"parent-dl": "<dl><picture><img src=x alt></picture></dl>",
"parent-hgroup": "<hgroup><h1>x</h1><picture><img src=x alt></picture></hgroup>",
"parent-noscript-in-head": "<noscript><picture><img src=x alt></picture></noscript>",
# invalid html syntax
"html-syntax-source-end-tag": "<picture><source srcset=x></source><img src=x alt></picture>",
"html-syntax-img-end-tag": "<picture><img src=x alt></img></picture>",
"html-syntax-picture-no-end-tag": "<picture><img src=x alt>",
"html-syntax-picture-slash": "<picture/><img src=x alt></picture>",
"html-syntax-picture-slash-no-end-tag": "<picture/><img src=x alt>",
# missing img in picture
"missing-img-empty-picture": "<picture></picture>",
"missing-img-only-source": "<picture><source srcset=x></picture>",
"missing-img-only-script": "<picture><script></script></picture>",
"missing-img-script-and-source": "<picture><script></script><source srcset=x></picture>",
"missing-img-source-and-script": "<picture><source srcset=x><script></script></picture>",
# multiple img in picture
"multiple-img": "<picture><img src=x alt><img src=x alt></picture>",
"multiple-img-with-script": "<picture><img src=x alt><script></script><img src=x alt></picture>",
"multiple-img-with-source": "<picture><source srcset=x><img src=x alt><img src=x alt></picture>",
"multiple-img-with-source-and-script": "<picture><source srcset=x><img src=x alt><script></script><img src=x alt></picture>",
# source after img
"source-after-img": "<picture><img src=x alt><source srcset=x></picture>",
"source-before-and-after-img": "<picture><source srcset=x><img src=x alt><source srcset=x></picture>",
# source with following sibling source element or img element with a srcset attribute
"always-matching-source-with-following-img-srcset": "<picture><source srcset=x><img src=x srcset=x alt></picture>",
"always-matching-source-with-following-source-srcset": "<picture><source srcset=x><source srcset=x><img src=x alt></picture>",
"always-matching-source-with-following-source-media": "<picture><source srcset=x><source srcset=x media=screen><img src=x alt></picture>",
"always-matching-source-with-following-source-type": "<picture><source srcset=x><source srcset=x type=image/gif><img src=x alt></picture>",
"always-matching-source-media-empty-with-following-source-srcset": "<picture><source srcset=x media><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-spaces-with-following-source-srcset": "<picture><source srcset=x media=' \n\t'><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-all-with-following-source-srcset": "<picture><source srcset=x media=all><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-uppercase-with-following-source-srcset": "<picture><source srcset=x media=ALL><source srcset=x><img src=x alt></picture>",
"always-matching-source-media-all-spaces-with-following-source-srcset": "<picture><source srcset=x media=' all '><source srcset=x><img src=x alt></picture>",
"always-matching-source-sizes-with-following-source-srcset": "<picture><source srcset='x 100w' sizes=50vw><source srcset=x><img src=x alt></picture>",
# sizes present
"img-srcset-no-descriptor-with-sizes": "<img src=x srcset='x' sizes=50vw alt>",
"img-srcset-w-and-x-width-sizes": "<img src=x srcset='x 100w, y 2x' sizes=50vw alt>",
"source-srcset-x-with-sizes": "<picture><source srcset='x 1x, y 2x' sizes=50vw><img src=x alt></picture>",
"source-srcset-h-with-sizes": "<picture><source srcset='x 100h, y 200h' sizes=50vw><img src=x alt></picture>",
"source-srcset-w-and-x-with-sizes": "<picture><source srcset='x 100w, y 2x' sizes=50vw><img src=x alt></picture>",
"img-with-sizes-no-srcset": "<img sizes=50vw src=foo alt>",
# width descriptor without sizes
"img-srcset-w-no-sizes": "<img srcset='x 100w, y 200w' src=x alt>",
"source-srcset-w-no-sizes": "<picture><source srcset='x 100w, y 200w'><img src=x alt></picture>",
"source-type-srcset-w": "<picture><source srcset='x 100w, y 200w' type=image/gif><img src=x alt></picture>",
# invalid attributes on source
"source-src": "<picture><source src=x><img src=x alt></picture>",
"source-src-srcset": "<picture><source src=x srcset=x><img src=x alt></picture>",
"source-alt": "<picture><source srcset=x alt><img src=x alt></picture>",
"source-width": "<picture><source srcset=x width=100><img src=x alt></picture>",
"source-height": "<picture><source srcset=x height=100><img src=x alt></picture>",
"source-usemap": "<picture><source srcset=x usemap><img src=x alt></picture>",
"source-ismap": "<picture><source srcset=x ismap><img src=x alt></picture>",
"source-crossorigin": "<picture><source srcset=x crossorigin><img src=x alt></picture>",
"source-name": "<picture><source srcset=x crossorigin><img src=x alt></picture>",
"source-align": "<picture><source srcset=x align=left><img src=x alt></picture>",
"source-hspace": "<picture><source srcset=x hspace=1><img src=x alt></picture>",
"source-vspace": "<picture><source srcset=x vspace=1><img src=x alt></picture>",
"source-longdesc": "<picture><source srcset=x longdesc=x><img src=x alt></picture>",
"source-border": "<picture><source srcset=x border=1><img src=x alt></picture>",
# missing srcset on source
"source-no-srcset": "<picture><source><img src=x alt></picture>",
"source-no-srcset-with-sizes": "<picture><source sizes=50vw><img src=x alt></picture>",
"source-no-srcset-with-media": "<picture><source media=screen><img src=x alt></picture>",
"source-no-srcset-with-type": "<picture><source type='image/webp'><img src=x alt></picture>",
# invalid attributes on picture
"picture-src": "<picture src=x><img src=x alt></picture>",
"picture-srcset": "<picture srcset=x><img src=x alt></picture>",
"picture-media": "<picture media=screen><img src=x alt></picture>",
"picture-sizes": "<picture sizes=50vw><img src=x alt></picture>",
"picture-alt": "<picture alt><img src=x alt></picture>",
"picture-width": "<picture width=100><img src=x alt></picture>",
"picture-height": "<picture height=100><img src=x alt></picture>",
"picture-usemap": "<picture usemap><img src=x alt></picture>",
"picture-ismap": "<picture ismap><img src=x alt></picture>",
"picture-crossorigin": "<picture crossorigin><img src=x alt></picture>",
"picture-name": "<picture name=x><img src=x alt></picture>",
"picture-lowsrc": "<picture lowsrc=x><img src=x alt></picture>",
"picture-align": "<picture align=left><img src=x alt></picture>",
"picture-hspace": "<picture hspace=1><img src=x alt></picture>",
"picture-vspace": "<picture vspace=1><img src=x alt></picture>",
"picture-longdesc": "<picture longdesc=x><img src=x alt></picture>",
"picture-border": "<picture border=1><img src=x alt></picture>",
# invalid attributes on source in video
"video-source-srcset": "<video><source srcset=x></video>",
"video-source-srcset-src": "<video><source srcset=x src=x></video>",
"video-source-sizes-srcset": "<video><source sizes=50vw srcset='x 100w'></video>",
"video-source-media-src": "<video><source media=screen src=x></video>",
# srcset on other elements
"link-rel-icon-srcset": "<link rel=icon srcset=x href=x>",
"input-type-image-srcset": "<input type=image src=x srcset=x alt=x>",
"object-srcset": "<object data=x srcset=x></object>",
"video-srcset": "<video src=x srcset=x></video>",
"audio-srcset": "<audio src=x srcset=x></audio>",
"track-srcset": "<video src=x><track src=x srcset=x></video>",
"svg-image-srcset": "<svg><image xlink:href=x srcset=x width=1 height=1 /></svg>",
# invalid attributes on img
"img-type": "<img src=x type=image/gif alt>",
"img-type-with-picture": "<picture><img src=x type=image/gif alt></picture>",
# sizes microsyntax
"sizes-microsyntax-media-all": "<img sizes='all 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-all-and-min-width": "<img sizes='all and (min-width:500px) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-min-width-no-parenthesis": "<img sizes='min-width:500px 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-general-enclosed-junk": "<img sizes='(123) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-bad-junk": "<img sizes='(}) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-two-defaults": "<img sizes='500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-default-first": "<img sizes='100vw, (min-width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-comma": "<img sizes='(min-width:500px) 500px, 100vw,' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-junk": "<img sizes='(min-width:500px) 500px, 100vw, foo bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-junk-in-default": "<img sizes='(min-width:500px) 500px, 100vw foo bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-junk-in-source-size": "<img sizes='(min-width:500px) 500px foo bar, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-percent-in-source-size-value": "<img sizes='(min-width:500px) 50%, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-no-unit-in-source-size-value": "<img sizes='(min-width:500px) 50, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-deg-source-size-value": "<img sizes='1deg' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-grad-source-size-value": "<img sizes='1grad' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-rad-source-size-value": "<img sizes='1rad' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-turn-source-size-value": "<img sizes='1turn' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-s-source-size-value": "<img sizes='1s' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ms-source-size-value": "<img sizes='1ms' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-hz-source-size-value": "<img sizes='1Hz' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-khz-source-size-value": "<img sizes='1kHz' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dpi-source-size-value": "<img sizes='1dpi' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dpcm-source-size-value": "<img sizes='1dpcm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-dppx-source-size-value": "<img sizes='1dppx' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-auto-source-size-value": "<img sizes='auto' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-inherit-source-size-value": "<img sizes='inherit' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-initial-source-size-value": "<img sizes='initial' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-default-source-size-value": "<img sizes='default' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-foo-bar-source-size-value": "<img sizes='foo-bar' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-negative-source-size-value": "<img sizes='-1px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-empty": "<img sizes='' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-comma": "<img sizes=',' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-css-comment-after-plus": "<img sizes='+/**/50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-css-comment-before-unit": "<img sizes='50/**/vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientific-notation-negative": "<img sizes='-1e+0px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientific-notation-non-integer-in-exponent": "<img sizes='1e+1.5px' srcset='x 100w, y 200w' src=x alt>",
# srcset microsyntax
"srcset-microsyntax-leading-comma": "<img srcset=',x' src=x alt>",
"srcset-microsyntax-leading-comma-multiple": "<img srcset=',,,x' src=x alt>",
"srcset-microsyntax-trailing-comma": "<img srcset='x,' src=x alt>",
"srcset-microsyntax-trailing-comma-multiple": "<img srcset='x,,,' src=x alt>",
"srcset-microsyntax-broken-url": "<img srcset='http: 1x' src=x alt>",
"srcset-microsyntax-non-integer-w": "<img srcset='x 1.5w' sizes=100vw src=x alt>",
"srcset-microsyntax-uppercase-w": "<img srcset='x 1W' sizes=100vw src=x alt>",
"srcset-microsyntax-plus-w": "<img srcset='x +1w' sizes=100vw src=x alt>",
"srcset-microsyntax-scientific-notation-w": "<img srcset='x 1e0w' sizes=100vw src=x alt>",
"srcset-microsyntax-zero-w": "<img srcset='x 0w' sizes=100vw src=x alt>",
"srcset-microsyntax-negative-zero-w": "<img srcset='x -0w' sizes=100vw src=x alt>",
"srcset-microsyntax-negative-w": "<img srcset='x -1w' sizes=100vw src=x alt>",
"srcset-microsyntax-plus-x": "<img srcset='x +1x' src=x alt>",
"srcset-microsyntax-negative-x": "<img srcset='x -1x' src=x alt>",
"srcset-microsyntax-zero-x": "<img srcset='x 0x' src=x alt>",
"srcset-microsyntax-negative-zero-x": "<img srcset='x -0x' src=x alt>",
"srcset-microsyntax-leading-dot-x": "<img srcset='x .5x' src=x alt>",
"srcset-microsyntax-nan-x": "<img srcset='x NaNx' src=x alt>",
"srcset-microsyntax-infinity-x": "<img srcset='x Infinityx' src=x alt>",
"srcset-microsyntax-x-and-w": "<img srcset='x 1x 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-x-and-h": "<img srcset='x 1x 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-w-and-h": "<img srcset='x 1w 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-h": "<img srcset='x 1h' sizes=100vw src=x alt>",
"srcset-microsyntax-function": "<img srcset='x foobar(baz quux, lol), y 1x' src=x alt>",
"srcset-microsyntax-parenthesis-junk": "<img srcset='x ><(((((o)>, y 1x' src=x alt>",
"srcset-microsyntax-square-bracket-junk": "<img srcset='x [, y 1x' src=x alt>",
"srcset-microsyntax-curly-bracket-junk": "<img srcset='x {, y 1x' src=x alt>",
"srcset-microsyntax-pipe-junk": "<img srcset='x ||, y 1x' src=x alt>",
"srcset-microsyntax-w-and-no-descriptor": "<img srcset='x 1w, y' sizes=100vw src=x alt>",
"srcset-microsyntax-unique-descriptors-1x-and-omitted": "<img srcset='x 1x, y' src=x alt>",
"srcset-microsyntax-unique-descriptors-2x": "<img srcset='x 2x, y 2x' src=x alt>",
"srcset-microsyntax-unique-descriptors-integer-and-decimals-x": "<img srcset='x 1x, y 1.0x' src=x alt>",
"srcset-microsyntax-unique-descriptors-w": "<img srcset='x 1w, y 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-empty": "<img srcset='' src=x alt>",
"srcset-microsyntax-comma": "<img srcset=',' src=x alt>",
"srcset-microsyntax-css-comment-after-descriptor": "<img srcset='x 2x/**/' src=x alt>",
# aria
"picture-aria-role-img": "<picture role=img><img src=x alt></picture>",
"picture-aria-role-button": "<picture role=button><img src=x alt></picture>",
"picture-aria-role-region": "<picture role=region><img src=x alt></picture>",
"picture-aria-role-application": "<picture role=application><img src=x alt></picture>",
"source-aria-role-img": "<picture><source role=img srcset=x><img src=x alt></picture>",
"picture-aria-role-presentation": "<picture role=presentation><img src=x alt></picture>",
"source-aria-role-presentation": "<picture><source role=presentation srcset=x><img src=x alt></picture>",
}
non_errors_in_head = {
"parent-template-in-head": "<template><picture><img src=x alt></picture></template>",
}
non_errors = {
# basic
"basic-img-src": "<img src=x alt>",
"basic-picture-img-src": "<picture><img src=x alt></picture>",
"basic-picture-source": "<picture><source srcset=x><img src=x alt></picture>",
# inter-element whitespace
"inter-element-whitespace": "<picture> <!--x--> <source srcset=x> <!--x--> <img src=x alt> <!--x--> </picture>",
# parents
"parent-p": "<p><picture><img src=x alt></picture></p>",
"parent-h1": "<h1><picture><img src=x alt=x></picture></h1>",
"parent-noscript-in-body": "<noscript><picture><img src=x alt></picture></noscript>",
"parent-object": "<object data=x><picture><img src=x alt></picture></object>",
"parent-video": "<video src=x><picture><img src=x alt></picture></video>",
"parent-section": "<section><h2>x</h2><picture><img src=x alt></picture></section>",
"parent-main": "<main><picture><img src=x alt></picture></main>",
"parent-canvas": "<canvas><picture><img src=x alt></picture></canvas>",
"parent-template-in-body": "<template><picture><img src=x alt></picture></template>",
"parent-ruby": "<ruby><picture><img src=x alt></picture><rt>x</rt></ruby>",
"parent-rt": "<ruby>x<rt><picture><img src=x alt></picture></rt></ruby>",
"parent-rp": "<ruby>x<rp><picture><img src=x alt></picture></rp><rt>x</rt><rp>x</rp></ruby>",
"parent-a": "<a href=x><picture><img src=x alt></picture></a>",
"parent-button": "<button><picture><img src=x alt></picture></button>",
"parent-td": "<table><tr><td><picture><img src=x alt></picture></table>",
# script-supporting elements
"script-first": "<picture><script></script><source srcset=x><img src=x alt></picture>",
"template-first": "<picture><template></template><source srcset=x><img src=x alt></picture>",
"script-between": "<picture><source srcset=x><script></script><img src=x alt></picture>",
"script-after": "<picture><source srcset=x><img src=x alt><script></script></picture>",
"script-before-after": "<picture><script></script><source srcset=x><img src=x alt><script></script></picture>",
"script-before-between-after": "<picture><script></script><source srcset=x><script></script><img src=x alt><script></script></picture>",
"script-and-template": "<picture><template></template><source srcset=x><script></script><img src=x alt><template></template></picture>",
# source with following sibling source element or img element with a srcset attribute
"source-with-media-img-with-srcset": "<picture><source srcset=x media=screen><img src=x srcset=x alt></picture>",
"source-with-media-uppercase-img-with-srcset": "<picture><source srcset=x media=SCREEN><img src=x srcset=x alt></picture>",
"source-with-media-spaces-img-with-srcset": "<picture><source srcset=x media=' \n\tscreen \n\t'><img src=x srcset=x alt></picture>",
"source-with-media-source-with-srcset": "<picture><source srcset=x media=screen><source srcset=x><img src=x alt></picture>",
"source-with-type-img-with-srcset": "<picture><source srcset=x type=image/gif><img src=x srcset=x alt></picture>",
"source-with-type-source-with-srcset": "<picture><source srcset=x type=image/gif><source srcset=x><img src=x alt></picture>",
# sizes present
"img-with-sizes": "<img srcset='x 100w, y 200w' sizes=50vw src=x alt>",
"source-with-sizes": "<picture><source srcset='x 100w, y 200w' sizes=50vw><img src=x alt></picture>",
# embed allows any attributes
"embed-srcset-empty": "<embed srcset>",
"embed-srcset-junk": "<embed srcset='foo bar'>",
"embed-sizes-empty": "<embed sizes>",
"embed-sizes-junk": "<embed sizes='foo bar'>",
# img src also in srcset
"img-src-also-in-srcset-1x": "<img src=x srcset='x 1x, y 2x' alt>",
"img-src-also-in-srcset-2x": "<img src=x srcset='y 1x, x 2x' alt>",
"img-src-also-in-srcset-w": "<img src=x srcset='x 100w, y 200w' sizes=100vw alt>",
# img src not in srcset
"img-src-not-in-srcset-x": "<img src=x srcset='y 1x, z 2x' alt>",
"img-src-not-in-srcset-w": "<img src=x srcset='y 100w, z 200w' sizes=100vw alt>",
# source type
"source-type": "<picture><source srcset=x type=image/gif><img src=x alt></picture>",
"source-type-srcset-x": "<picture><source srcset='x 1x, y 2x' type=image/gif><img src=x alt></picture>",
"source-type-srcset-w-sizes": "<picture><source srcset='x 100w, y 200w' type=image/gif sizes=50vw><img src=x alt></picture>",
# sizes microsyntax
"sizes-microsyntax-media-min-width": "<img sizes='(min-width:500px) 500px, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-multiple-source-sizes": "<img sizes='(min-width:1500px) 500px, (min-width:1000px) 33vw, (min-width:500px) 50vw, 100vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-no-default": "<img sizes='(min-width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-media-not-and": "<img sizes='not (width:500px) and (width:500px) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-only-default": "<img sizes='500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-default": "<img sizes='calc(500px)' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-source-size-value": "<img sizes='(min-width:500px) calc(500px)' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-calc-in-media": "<img sizes='(min-width:calc(500px)) 500px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-zero": "<img sizes='0' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-minus-zero": "<img sizes='-0' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-em-in-source-size-value": "<img sizes='1em' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ex-in-source-size-value": "<img sizes='1ex' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-ch-in-source-size-value": "<img sizes='1ch' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-rem-in-source-size-value": "<img sizes='1rem' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vw-in-source-size-value": "<img sizes='1vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vh-in-source-size-value": "<img sizes='1vh' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vmin-in-source-size-value": "<img sizes='1vmin' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-vmax-in-source-size-value": "<img sizes='1vmax' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-cm-in-source-size-value": "<img sizes='1cm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-mm-in-source-size-value": "<img sizes='1mm' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-q-in-source-size-value": "<img sizes='1q' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-in-in-source-size-value": "<img sizes='1in' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-pc-in-source-size-value": "<img sizes='1pc' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-pt-in-source-size-value": "<img sizes='1pt' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-px-in-source-size-value": "<img sizes='1px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-non-integer-px-in-source-size-value": "<img sizes='0.2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-leading-css-comment": "<img sizes='/**/50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-trailing-css-comment": "<img sizes='50vw/**/' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-plus": "<img sizes='+50vw' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-non-integer-omitted-zero": "<img sizes='.2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-0": "<img sizes='-0e-0px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-1": "<img sizes='+11.11e+11px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-2": "<img sizes='2.2e2px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-3": "<img sizes='33E33px' srcset='x 100w, y 200w' src=x alt>",
"sizes-microsyntax-scientifi-notation-4": "<img sizes='.4E4px' srcset='x 100w, y 200w' src=x alt>",
# srcset microsyntax
"srcset-microsyntax-comma-in-url": "<img srcset='x,x' src=x alt>",
"srcset-microsyntax-percent-escaped-leading-comma-in-url": "<img srcset='%2Cx' src=x alt>",
"srcset-microsyntax-percent-escaped-trailing-comma-in-url": "<img srcset='x%2C' src=x alt>",
"srcset-microsyntax-percent-escaped-space-in-url": "<img srcset='%20' src=x alt>",
"srcset-microsyntax-w": "<img srcset='x 1w' sizes=100vw src=x alt>",
"srcset-microsyntax-x": "<img srcset='x 1x' src=x alt>",
"srcset-microsyntax-non-integer-x": "<img srcset='x 1.5x' src=x alt>",
"srcset-microsyntax-scientific-notation-x": "<img srcset='x 1e0x' src=x alt>",
"srcset-microsyntax-scientific-notation-decimals-x": "<img srcset='x 1.5e0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-plus-x": "<img srcset='x 1e+0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-minus-x": "<img srcset='x 1e-0x' src=x alt>",
"srcset-microsyntax-scientific-notation-e-uppercase-x": "<img srcset='x 1E0x' src=x alt>",
"srcset-microsyntax-no-space-between-candidates": "<img srcset='x 1x,y 2x' src=x alt>",
# valid attributes on img in picture
"img-crossorigin-with-picture": "<picture><img crossorigin src=x alt></picture>",
"img-usemap-with-picture": "<picture><img usemap=#x src=x alt></picture><map name=x></map>",
"img-ismap-with-picture": "<a href=x><picture><img ismap src=x alt></picture></a>",
"img-width-height-with-picture": "<picture><img src=x alt width=1 height=1></picture>",
"img-width-height-zero-with-picture": "<picture><img src=x alt width=0 height=0></picture>",
# global attributes on picture
"picture-global-attributes": "<picture title=x class=x dir=ltr hidden id=asdf tabindex=0><img src=x alt></picture>",
}
for key in errors.keys():
template_error = template
template_error += '<title>invalid %s</title>\n' % key
template_error += errors[key]
file = open(os.path.join(ccdir, "html/elements/picture/%s-novalid.html" % key), 'wb')
file.write(template_error)
file.close()
file = open(os.path.join(ccdir, "html/elements/picture/picture-isvalid.html"), 'wb')
file.write(template + '<title>valid picture</title>\n')
for key in non_errors_in_head.keys():
file.write('%s <!-- %s -->\n' % (non_errors_in_head[key], key))
file.write('<body>\n')
for key in non_errors.keys():
file.write('%s <!-- %s -->\n' % (non_errors[key], key))
file.close()
# vim: ts=4:sw=4
| mpl-2.0 |
davidrenne/django-allauth | allauth/socialaccount/admin.py | 67 | 1830 | import django
from django.contrib import admin
from django import forms
from allauth.account.adapter import get_adapter
from .models import SocialApp, SocialAccount, SocialToken
class SocialAppForm(forms.ModelForm):
class Meta:
model = SocialApp
exclude = []
widgets = {
'client_id': forms.TextInput(attrs={'size': '100'}),
'key': forms.TextInput(attrs={'size': '100'}),
'secret': forms.TextInput(attrs={'size': '100'})
}
class SocialAppAdmin(admin.ModelAdmin):
form = SocialAppForm
list_display = ('name', 'provider',)
filter_horizontal = ('sites',)
class SocialAccountAdmin(admin.ModelAdmin):
search_fields = []
raw_id_fields = ('user',)
list_display = ('user', 'uid', 'provider')
list_filter = ('provider',)
def __init__(self, *args, **kwargs):
super(SocialAccountAdmin, self).__init__(*args, **kwargs)
if not self.search_fields and django.VERSION[:2] < (1, 7):
self.search_fields = self.get_search_fields(None)
def get_search_fields(self, request):
base_fields = get_adapter().get_user_search_fields()
return list(map(lambda a: 'user__' + a, base_fields))
class SocialTokenAdmin(admin.ModelAdmin):
raw_id_fields = ('app', 'account',)
list_display = ('app', 'account', 'truncated_token', 'expires_at')
list_filter = ('app', 'app__provider', 'expires_at')
def truncated_token(self, token):
max_chars = 40
ret = token.token
if len(ret) > max_chars:
ret = ret[0:max_chars] + '...(truncated)'
return ret
truncated_token.short_description = 'Token'
admin.site.register(SocialApp, SocialAppAdmin)
admin.site.register(SocialToken, SocialTokenAdmin)
admin.site.register(SocialAccount, SocialAccountAdmin)
| mit |
AsherYang/ThreeLine | server/ffstore/mgrsys/PermissionManager.py | 1 | 2141 | #! /usr/bin/python
# -*- coding:utf-8 -*-
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2018/7/2
Desc: 权限管理类
主要做一些权限校验
"""
from BaseResponse import BaseResponse
from constant import ResponseCode
from constant import LoginStatus
from util.MD5Util import MD5Util, ADMIN_SECRET_KEY
from mgrsys.AdminManager import AdminManager
class PermissionManager:
def __init__(self):
pass
"""
校验管理员权限
1. 先根据时间做一次md5 校验
2. 再根据用户号码和密码做一次登录校验
3. 最后校验登陆时效(第3与2步在adminMgr.checkLoginState中)
登录过的管理员才有操作权限
@:return baseResponse 用于直接在 api 中返回
"""
def checkAdminPermissionWithLoginStatus(self, sign, time, admin_tel, sms_pwd):
baseResponse = BaseResponse()
if sign is None or time is None or admin_tel is None or sms_pwd is None:
baseResponse.code = ResponseCode.fail_api_args
baseResponse.desc = ResponseCode.fail_api_args_desc
else:
md5Util = MD5Util(ADMIN_SECRET_KEY)
adminMgr = AdminManager()
if sign == md5Util.md5Signature(time):
login_status = adminMgr.checkLoginState(admin_tel, sms_pwd)
if login_status == LoginStatus.STATUS_LOGIN_SUCCESS:
baseResponse.code = ResponseCode.success_check_admin_permission
baseResponse.desc = ResponseCode.success_check_admin_permission_desc
elif login_status == LoginStatus.STATUS_LOGIN_OUT_OF_DATE:
baseResponse.code = ResponseCode.fail_admin_out_of_date
baseResponse.desc = ResponseCode.fail_admin_out_of_date_desc
else:
baseResponse.code = ResponseCode.fail_admin_login
baseResponse.desc = ResponseCode.fail_admin_login_desc
else:
baseResponse.code = ResponseCode.illegal_md5_client
baseResponse.desc = ResponseCode.illegal_md5_client_desc
return baseResponse
| apache-2.0 |
j00bar/ansible | lib/ansible/module_utils/aos.py | 29 | 5670 | #
# Copyright (c) 2017 Apstra Inc, <community@apstra.com>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Apstra AOS modules
In order to use this module, include it as part of your module
from ansible.module_utils.aos import *
"""
import json
from distutils.version import LooseVersion
from ansible.module_utils.pycompat24 import get_exception
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
def check_aos_version(module, min=False):
"""
Check if the library aos-pyez is present.
If provided, also check if the minimum version requirement is met
"""
if not HAS_AOS_PYEZ:
module.fail_json(msg='aos-pyez is not installed. Please see details '
'here: https://github.com/Apstra/aos-pyez')
elif min:
import apstra.aosom
AOS_PYEZ_VERSION = apstra.aosom.__version__
if not LooseVersion(AOS_PYEZ_VERSION) >= LooseVersion(min):
module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
return True
def get_aos_session(module, auth):
"""
Resume an existing session and return an AOS object.
Args:
auth (dict): An AOS session as obtained by aos_login module blocks::
dict( token=<token>,
server=<ip>,
port=<port>
)
Return:
Aos object
"""
check_aos_version(module)
aos = Session()
aos.session = auth
return aos
def find_collection_item(collection, item_name=False, item_id=False):
"""
Find collection_item based on name or id from a collection object
Both Collection_item and Collection Objects are provided by aos-pyez library
Return
collection_item: object corresponding to the collection type
"""
my_dict = None
if item_name:
my_dict = collection.find(label=item_name)
elif item_id:
my_dict = collection.find(uid=item_id)
if my_dict is None:
return collection['']
else:
return my_dict
def content_to_dict(module, content):
"""
Convert 'content' into a Python Dict based on 'content_format'
"""
# if not HAS_YAML:
# module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'")
content_dict = None
# try:
# content_dict = json.loads(content.replace("\'", '"'))
# except:
# module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
#
# elif format in ['yaml', 'var']:
try:
content_dict = yaml.safe_load(content)
if not isinstance(content_dict, dict):
raise
# Check if dict is empty and return an error if it's
if not content_dict:
raise
except:
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
# replace the string with the dict
module.params['content'] = content_dict
return content_dict
def do_load_resource(module, collection, name):
"""
Create a new object (collection.item) by loading a datastructure directly
"""
try:
item = find_collection_item(collection, name, '')
except:
module.fail_json(msg="Ans error occured while running 'find_collection_item'")
if item.exists:
module.exit_json( changed=False,
name=item.name,
id=item.id,
value=item.value )
# If not in check mode, apply the changes
if not module.check_mode:
try:
item.datum = module.params['content']
item.write()
except:
e = get_exception()
module.fail_json(msg="Unable to write item content : %r" % e)
module.exit_json( changed=True,
name=item.name,
id=item.id,
value=item.value )
| gpl-3.0 |
fanjunwei/depot_tools | third_party/pylint/__init__.py | 70 | 1458 | # Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from .__pkginfo__ import version as __version__
def run_pylint():
"""run pylint"""
from pylint.lint import Run
Run(sys.argv[1:])
def run_pylint_gui():
"""run pylint-gui"""
try:
from pylint.gui import Run
Run(sys.argv[1:])
except ImportError:
sys.exit('tkinter is not available')
def run_epylint():
"""run pylint"""
from pylint.epylint import Run
Run()
def run_pyreverse():
"""run pyreverse"""
from pylint.pyreverse.main import Run
Run(sys.argv[1:])
def run_symilar():
"""run symilar"""
from pylint.checkers.similar import Run
Run(sys.argv[1:])
| bsd-3-clause |
hornn/interviews | tools/bin/ext/yaml/emitter.py | 126 | 44317 |
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
import re
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
u'tag:yaml.org,2002:' : u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding:
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = self.event.tags.keys()
handles.sort()
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
for prefix in self.tag_prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Whitespaces.
inline_spaces = False # non-space space+ non-space
inline_breaks = False # non-space break+ non-space
leading_spaces = False # ^ space+ (non-space | $)
leading_breaks = False # ^ break+ (non-space | $)
trailing_spaces = False # (^ | non-space) space+ $
trailing_breaks = False # (^ | non-space) break+ $
inline_breaks_spaces = False # non-space break+ space+ non-space
mixed_breaks_spaces = False # anything else
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_space = True
# Last character or followed by a whitespace.
followed_by_space = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The current series of whitespaces contain plain spaces.
spaces = False
# The current series of whitespaces contain line breaks.
breaks = False
# The current series of whitespaces contain a space followed by a
# break.
mixed = False
# The current series of whitespaces start at the beginning of the
# scalar.
leading = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_space:
block_indicators = True
if ch == u'-' and followed_by_space:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_space:
block_indicators = True
if ch == u'#' and preceeded_by_space:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Spaces, line breaks, and how they are mixed. State machine.
# Start or continue series of whitespaces.
if ch in u' \n\x85\u2028\u2029':
if spaces and breaks:
if ch != u' ': # break+ (space+ break+) => mixed
mixed = True
elif spaces:
if ch != u' ': # (space+ break+) => mixed
breaks = True
mixed = True
elif breaks:
if ch == u' ': # break+ space+
spaces = True
else:
leading = (index == 0)
if ch == u' ': # space+
spaces = True
else: # break+
breaks = True
# Series of whitespaces ended with a non-space.
elif spaces or breaks:
if leading:
if spaces and breaks:
mixed_breaks_spaces = True
elif spaces:
leading_spaces = True
elif breaks:
leading_breaks = True
else:
if mixed:
mixed_breaks_spaces = True
elif spaces and breaks:
inline_breaks_spaces = True
elif spaces:
inline_spaces = True
elif breaks:
inline_breaks = True
spaces = breaks = mixed = leading = False
# Series of whitespaces reach the end.
if (spaces or breaks) and (index == len(scalar)-1):
if spaces and breaks:
mixed_breaks_spaces = True
elif spaces:
trailing_spaces = True
if leading:
leading_spaces = True
elif breaks:
trailing_breaks = True
if leading:
leading_breaks = True
spaces = breaks = mixed = leading = False
# Prepare for the next character.
index += 1
preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_space = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespace are bad for plain scalars. We also
# do not want to mess with leading whitespaces for block scalars.
if leading_spaces or leading_breaks or trailing_spaces:
allow_flow_plain = allow_block_plain = allow_block = False
# Trailing breaks are fine for block scalars, but unacceptable for
# plain scalars.
if trailing_breaks:
allow_flow_plain = allow_block_plain = False
# The combination of (space+ break+) is only acceptable for block
# scalars.
if inline_breaks_spaces:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Mixed spaces and breaks, as well as special character are only
# allowed for double quoted scalars.
if mixed_breaks_spaces or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# We don't emit multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\xFF\xFE'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_chomp(self, text):
tail = text[-2:]
while len(tail) < 2:
tail = u' '+tail
if tail[-1] in u'\n\x85\u2028\u2029':
if tail[-2] in u'\n\x85\u2028\u2029':
return u'+'
else:
return u''
else:
return u'-'
def write_folded(self, text):
chomp = self.determine_chomp(text)
self.write_indicator(u'>'+chomp, True)
self.write_indent()
leading_space = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
chomp = self.determine_chomp(text)
self.write_indicator(u'|'+chomp, True)
self.write_indent()
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.writespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.writespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
| apache-2.0 |
makinacorpus/Geotrek | geotrek/maintenance/forms.py | 1 | 7525 | from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.forms import FloatField
from django.utils.translation import gettext_lazy as _
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Fieldset, Layout, Div, HTML
from geotrek.common.forms import CommonForm
from geotrek.core.fields import TopologyField
from geotrek.core.models import Topology
from .models import Intervention, Project
class ManDayForm(forms.ModelForm):
class Meta:
fields = ('id', 'nb_days', 'job')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout('id', 'nb_days', 'job')
self.fields['nb_days'].widget.attrs['placeholder'] = _('Days')
self.fields['nb_days'].label = ''
self.fields['nb_days'].widget.attrs['class'] = 'input-mini'
self.fields['job'].widget.attrs['class'] = 'input-medium'
ManDayFormSet = inlineformset_factory(Intervention, Intervention.jobs.through, form=ManDayForm, extra=1)
class FundingForm(forms.ModelForm):
class Meta:
fields = ('id', 'amount', 'organism')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout('id', 'amount', 'organism')
self.fields['organism'].widget.attrs['class'] = 'input-xlarge'
FundingFormSet = inlineformset_factory(Project, Project.founders.through, form=FundingForm, extra=1)
class InterventionForm(CommonForm):
""" An intervention can be a Point or a Line """
topology = TopologyField(label="")
length = FloatField(required=False, label=_("Length"))
project = forms.ModelChoiceField(required=False, label=_("Project"),
queryset=Project.objects.existing())
geomfields = ['topology']
leftpanel_scrollable = False
fieldslayout = [
Div(
HTML("""
<ul class="nav nav-tabs">
<li id="tab-main" class="active"><a href="#main" data-toggle="tab"><i class="icon-certificate"></i> %s</a></li>
<li id="tab-advanced"><a href="#advanced" data-toggle="tab"><i class="icon-tasks"></i> %s</a></li>
</ul>""" % (_("Main"), _("Advanced"))),
Div(
Div(
'structure',
'name',
'date',
'status',
'disorders',
'type',
'subcontracting',
'length',
'width',
'height',
'stake',
'project',
'description',
css_id="main",
css_class="tab-pane active"
),
Div(
'material_cost',
'heliport_cost',
'subcontract_cost',
Fieldset(_("Mandays")),
css_id="advanced", # used in Javascript for activating tab if error
css_class="tab-pane"
),
css_class="scrollable tab-content"
),
css_class="tabbable"
),
]
class Meta(CommonForm.Meta):
model = Intervention
fields = CommonForm.Meta.fields + \
['structure', 'name', 'date', 'status', 'disorders', 'type', 'description', 'subcontracting', 'length', 'width',
'height', 'stake', 'project', 'material_cost', 'heliport_cost', 'subcontract_cost', 'topology']
def __init__(self, *args, target_type=None, target_id=None, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance.pk:
# New intervention. We have to set its target.
if target_type and target_id:
# Point target to an existing topology
ct = ContentType.objects.get_for_id(target_type)
self.instance.target = ct.get_object_for_this_type(id=target_id)
# Set POST URL
self.helper.form_action += '?target_type={}&target_id={}'.format(target_type, target_id)
else:
# Point target to a new topology
self.instance.target = Topology(kind='INTERVENTION')
# Else: existing intervention. Target is already set
self.fields['topology'].initial = self.instance.target
if self.instance.target.__class__ == Topology:
# Intervention has its own topology
title = _("On {}".format(_("Paths")))
self.fields['topology'].label = \
'<img src="{prefix}images/path-16.png" title="{title}">{title}'.format(
prefix=settings.STATIC_URL, title=title
)
else:
# Intervention on an existing topology
icon = self.instance.target._meta.model_name
title = _("On {}".format(str(self.instance.target)))
self.fields['topology'].label = \
'<img src="{prefix}images/{icon}-16.png" title="{title}"><a href="{url}">{title}</a>'.format(
prefix=settings.STATIC_URL, icon=icon, title=title,
url=self.instance.target.get_detail_url()
)
# Topology is readonly
del self.fields['topology']
# Length is not editable in AltimetryMixin
self.fields['length'].initial = self.instance.length
editable = bool(self.instance.geom and (self.instance.geom.geom_type == 'Point'
or self.instance.geom.geom_type == 'LineString'))
self.fields['length'].widget.attrs['readonly'] = editable
def save(self, *args, **kwargs):
target = self.instance.target
if not target.pk:
target.save()
topology = self.cleaned_data.get('topology')
if topology and topology.pk != target.pk:
target.mutate(topology)
intervention = super().save(*args, **kwargs, commit=False)
intervention.target = target
intervention.save()
self.save_m2m()
return intervention
class ProjectForm(CommonForm):
fieldslayout = [
Div(
Div(
Div('structure',
'name',
'type',
'domain',
'begin_year',
'end_year',
'constraint',
'global_cost',
'comments',
css_class="span6"),
Div('project_owner',
'project_manager',
'contractors',
Fieldset(_("Fundings")),
css_class="span6"),
css_class="row-fluid"
),
css_class="container-fluid"
),
]
class Meta(CommonForm.Meta):
model = Project
fields = CommonForm.Meta.fields + \
['structure', 'name', 'type', 'domain', 'begin_year', 'end_year', 'constraint',
'global_cost', 'comments', 'project_owner', 'project_manager', 'contractors']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.form_tag = False
| bsd-2-clause |
dbaxa/django | django/contrib/admin/actions.py | 395 | 3316 | """
Built-in, globally-available admin actions.
"""
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.utils import get_deleted_objects, model_ngettext
from django.core.exceptions import PermissionDenied
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _, ugettext_lazy
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = dict(
modeladmin.admin_site.each_context(request),
title=title,
objects_name=objects_name,
deletable_objects=[deletable_objects],
model_count=dict(model_count).items(),
queryset=queryset,
perms_lacking=perms_needed,
protected=protected,
opts=opts,
action_checkbox_name=helpers.ACTION_CHECKBOX_NAME,
)
request.current_app = modeladmin.admin_site.name
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| bsd-3-clause |
veroc/Bika-LIMS | bika/lims/exportimport/instruments/thermoscientific/arena/xt20.py | 3 | 3232 | """ Thermo Scientific 'Arena 20XT' (The file name for importing staff)
"""
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from . import ThermoArenaImporter, ThermoArenaRPRCSVParser
import json
import traceback
title = "Thermo Scientific - Arena 20XT"
def Import(context, request):
""" Thermo Scientific - Arena 20XT analysis results
"""
infile = request.form['thermoscientific_arena_20XT_file']
fileformat = request.form['thermoscientific_arena_20XT_format']
artoapply = request.form['thermoscientific_arena_20XT_artoapply']
override = request.form['thermoscientific_arena_20XT_override']
sample = request.form.get('thermoscientific_arena_20XT_sample',
'requestid')
instrument = request.form.get('thermoscientific_arena_20XT_instrument', None)
errors = []
logs = []
warns = []
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'rpr.csv':
parser = ThermoArena20XTRPRCSVParser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample == 'requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = ThermoArena20XTImporter(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
class ThermoArena20XTImporter(ThermoArenaImporter):
def getKeywordsToBeExcluded(self):
return []
class ThermoArena20XTRPRCSVParser(ThermoArenaRPRCSVParser):
def getAttachmentFileType(self):
return "Thermo Scientific Arena 20XT RPR.CSV" | agpl-3.0 |
vineodd/PIMSim | GEM5Simulation/gem5/util/stats/dbinit.py | 90 | 15861 | # Copyright (c) 2003-2004 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import MySQLdb
class MyDB(object):
def __init__(self, options):
self.name = options.db
self.host = options.host
self.user = options.user
self.passwd = options.passwd
self.mydb = None
self.cursor = None
def admin(self):
self.close()
self.mydb = MySQLdb.connect(db='mysql', host=self.host, user=self.user,
passwd=self.passwd)
self.cursor = self.mydb.cursor()
def connect(self):
self.close()
self.mydb = MySQLdb.connect(db=self.name, host=self.host,
user=self.user, passwd=self.passwd)
self.cursor = self.mydb.cursor()
def close(self):
if self.mydb is not None:
self.mydb.close()
self.cursor = None
def query(self, sql):
self.cursor.execute(sql)
def drop(self):
self.query('DROP DATABASE IF EXISTS %s' % self.name)
def create(self):
self.query('CREATE DATABASE %s' % self.name)
def populate(self):
#
# Each run (or simulation) gets its own entry in the runs table to
# group stats by where they were generated
#
# COLUMNS:
# 'id' is a unique identifier for each run to be used in other
# tables.
# 'name' is the user designated name for the data generated. It is
# configured in the simulator.
# 'user' identifies the user that generated the data for the given
# run.
# 'project' another name to identify runs for a specific goal
# 'date' is a timestamp for when the data was generated. It can be
# used to easily expire data that was generated in the past.
# 'expire' is a timestamp for when the data should be removed from
# the database so we don't have years worth of junk.
#
# INDEXES:
# 'run' is indexed so you can find out details of a run if the run
# was retreived from the data table.
# 'name' is indexed so that two all run names are forced to be unique
#
self.query('''
CREATE TABLE runs(
rn_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
rn_name VARCHAR(200) NOT NULL,
rn_sample VARCHAR(32) NOT NULL,
rn_user VARCHAR(32) NOT NULL,
rn_project VARCHAR(100) NOT NULL,
rn_date TIMESTAMP NOT NULL,
rn_expire TIMESTAMP NOT NULL,
PRIMARY KEY (rn_id),
UNIQUE (rn_name,rn_sample)
) TYPE=InnoDB''')
#
# The stat table gives us all of the data for a particular stat.
#
# COLUMNS:
# 'stat' is a unique identifier for each stat to be used in other
# tables for references.
# 'name' is simply the simulator derived name for a given
# statistic.
# 'descr' is the description of the statistic and what it tells
# you.
# 'type' defines what the stat tells you. Types are:
# SCALAR: A simple scalar statistic that holds one value
# VECTOR: An array of statistic values. Such a something that
# is generated per-thread. Vectors exist to give averages,
# pdfs, cdfs, means, standard deviations, etc across the
# stat values.
# DIST: Is a distribution of data. When the statistic value is
# sampled, its value is counted in a particular bucket.
# Useful for keeping track of utilization of a resource.
# (e.g. fraction of time it is 25% used vs. 50% vs. 100%)
# VECTORDIST: Can be used when the distribution needs to be
# factored out into a per-thread distribution of data for
# example. It can still be summed across threads to find
# the total distribution.
# VECTOR2D: Can be used when you have a stat that is not only
# per-thread, but it is per-something else. Like
# per-message type.
# FORMULA: This statistic is a formula, and its data must be
# looked up in the formula table, for indicating how to
# present its values.
# 'subdata' is potentially used by any of the vector types to
# give a specific name to all of the data elements within a
# stat.
# 'print' indicates whether this stat should be printed ever.
# (Unnamed stats don't usually get printed)
# 'prereq' only print the stat if the prereq is not zero.
# 'prec' number of decimal places to print
# 'nozero' don't print zero values
# 'nonan' don't print NaN values
# 'total' for vector type stats, print the total.
# 'pdf' for vector type stats, print the pdf.
# 'cdf' for vector type stats, print the cdf.
#
# The Following are for dist type stats:
# 'min' is the minimum bucket value. Anything less is an underflow.
# 'max' is the maximum bucket value. Anything more is an overflow.
# 'bktsize' is the approximate number of entries in each bucket.
# 'size' is the number of buckets. equal to (min/max)/bktsize.
#
# INDEXES:
# 'stat' is indexed so that you can find out details about a stat
# if the stat id was retrieved from the data table.
# 'name' is indexed so that you can simply look up data about a
# named stat.
#
self.query('''
CREATE TABLE stats(
st_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
st_name VARCHAR(255) NOT NULL,
st_descr TEXT NOT NULL,
st_type ENUM("SCALAR", "VECTOR", "DIST", "VECTORDIST",
"VECTOR2D", "FORMULA") NOT NULL,
st_print BOOL NOT NULL,
st_prereq SMALLINT UNSIGNED NOT NULL,
st_prec TINYINT NOT NULL,
st_nozero BOOL NOT NULL,
st_nonan BOOL NOT NULL,
st_total BOOL NOT NULL,
st_pdf BOOL NOT NULL,
st_cdf BOOL NOT NULL,
st_min DOUBLE NOT NULL,
st_max DOUBLE NOT NULL,
st_bktsize DOUBLE NOT NULL,
st_size SMALLINT UNSIGNED NOT NULL,
PRIMARY KEY (st_id),
UNIQUE (st_name)
) TYPE=InnoDB''')
#
# This is the main table of data for stats.
#
# COLUMNS:
# 'stat' refers to the stat field given in the stat table.
#
# 'x' referrs to the first dimension of a multi-dimensional stat. For
# a vector, x will start at 0 and increase for each vector
# element.
# For a distribution:
# -1: sum (for calculating standard deviation)
# -2: sum of squares (for calculating standard deviation)
# -3: total number of samples taken (for calculating
# standard deviation)
# -4: minimum value
# -5: maximum value
# -6: underflow
# -7: overflow
# 'y' is used by a VECTORDIST and the VECTOR2D to describe the second
# dimension.
# 'run' is the run that the data was generated from. Details up in
# the run table
# 'tick' is a timestamp generated by the simulator.
# 'data' is the actual stat value.
#
# INDEXES:
# 'stat' is indexed so that a user can find all of the data for a
# particular stat. It is not unique, because that specific stat
# can be found in many runs and samples, in addition to
# having entries for the mulidimensional cases.
# 'run' is indexed to allow a user to remove all of the data for a
# particular execution run. It can also be used to allow the
# user to print out all of the data for a given run.
#
self.query('''
CREATE TABLE data(
dt_stat SMALLINT UNSIGNED NOT NULL,
dt_x SMALLINT NOT NULL,
dt_y SMALLINT NOT NULL,
dt_run SMALLINT UNSIGNED NOT NULL,
dt_tick BIGINT UNSIGNED NOT NULL,
dt_data DOUBLE NOT NULL,
INDEX (dt_stat),
INDEX (dt_run),
UNIQUE (dt_stat,dt_x,dt_y,dt_run,dt_tick)
) TYPE=InnoDB;''')
#
# Names and descriptions for multi-dimensional stats (vectors, etc.)
# are stored here instead of having their own entry in the statistics
# table. This allows all parts of a single stat to easily share a
# single id.
#
# COLUMNS:
# 'stat' is the unique stat identifier from the stat table.
# 'x' is the first dimension for multi-dimensional stats
# corresponding to the data table above.
# 'y' is the second dimension for multi-dimensional stats
# corresponding to the data table above.
# 'name' is the specific subname for the unique stat,x,y combination.
# 'descr' is the specific description for the uniqe stat,x,y
# combination.
#
# INDEXES:
# 'stat' is indexed so you can get the subdata for a specific stat.
#
self.query('''
CREATE TABLE subdata(
sd_stat SMALLINT UNSIGNED NOT NULL,
sd_x SMALLINT NOT NULL,
sd_y SMALLINT NOT NULL,
sd_name VARCHAR(255) NOT NULL,
sd_descr TEXT,
UNIQUE (sd_stat,sd_x,sd_y)
) TYPE=InnoDB''')
#
# The formula table is maintained separately from the data table
# because formula data, unlike other stat data cannot be represented
# there.
#
# COLUMNS:
# 'stat' refers to the stat field generated in the stat table.
# 'formula' is the actual string representation of the formula
# itself.
#
# INDEXES:
# 'stat' is indexed so that you can just look up a formula.
#
self.query('''
CREATE TABLE formulas(
fm_stat SMALLINT UNSIGNED NOT NULL,
fm_formula BLOB NOT NULL,
PRIMARY KEY(fm_stat)
) TYPE=InnoDB''')
#
# Each stat used in each formula is kept in this table. This way, if
# you want to print out a particular formula, you can simply find out
# which stats you need by looking in this table. Additionally, when
# you remove a stat from the stats table and data table, you remove
# any references to the formula in this table. When a formula is no
# longer referred to, you remove its entry.
#
# COLUMNS:
# 'stat' is the stat id from the stat table above.
# 'child' is the stat id of a stat that is used for this formula.
# There may be many children for any given 'stat' (formula)
#
# INDEXES:
# 'stat' is indexed so you can look up all of the children for a
# particular stat.
# 'child' is indexed so that you can remove an entry when a stat is
# removed.
#
self.query('''
CREATE TABLE formula_ref(
fr_stat SMALLINT UNSIGNED NOT NULL,
fr_run SMALLINT UNSIGNED NOT NULL,
UNIQUE (fr_stat,fr_run),
INDEX (fr_stat),
INDEX (fr_run)
) TYPE=InnoDB''')
# COLUMNS:
# 'event' is the unique event id from the event_desc table
# 'run' is simulation run id that this event took place in
# 'tick' is the tick when the event happened
#
# INDEXES:
# 'event' is indexed so you can look up all occurences of a
# specific event
# 'run' is indexed so you can find all events in a run
# 'tick' is indexed because we want the unique thing anyway
# 'event,run,tick' is unique combination
self.query('''
CREATE TABLE events(
ev_event SMALLINT UNSIGNED NOT NULL,
ev_run SMALLINT UNSIGNED NOT NULL,
ev_tick BIGINT UNSIGNED NOT NULL,
INDEX(ev_event),
INDEX(ev_run),
INDEX(ev_tick),
UNIQUE(ev_event,ev_run,ev_tick)
) TYPE=InnoDB''')
# COLUMNS:
# 'id' is the unique description id
# 'name' is the name of the event that occurred
#
# INDEXES:
# 'id' is indexed because it is the primary key and is what you use
# to look up the descriptions
# 'name' is indexed so one can find the event based on name
#
self.query('''
CREATE TABLE event_names(
en_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
en_name VARCHAR(255) NOT NULL,
PRIMARY KEY (en_id),
UNIQUE (en_name)
) TYPE=InnoDB''')
def clean(self):
self.query('''
DELETE data
FROM data
LEFT JOIN runs ON dt_run=rn_id
WHERE rn_id IS NULL''')
self.query('''
DELETE formula_ref
FROM formula_ref
LEFT JOIN runs ON fr_run=rn_id
WHERE rn_id IS NULL''')
self.query('''
DELETE formulas
FROM formulas
LEFT JOIN formula_ref ON fm_stat=fr_stat
WHERE fr_stat IS NULL''')
self.query('''
DELETE stats
FROM stats
LEFT JOIN data ON st_id=dt_stat
WHERE dt_stat IS NULL''')
self.query('''
DELETE subdata
FROM subdata
LEFT JOIN data ON sd_stat=dt_stat
WHERE dt_stat IS NULL''')
self.query('''
DELETE events
FROM events
LEFT JOIN runs ON ev_run=rn_id
WHERE rn_id IS NULL''')
self.query('''
DELETE event_names
FROM event_names
LEFT JOIN events ON en_id=ev_event
WHERE ev_event IS NULL''')
| gpl-3.0 |
Pauliecoon/android_kernel_motorola_shamu_benzoCore | scripts/build-all.py | 236 | 11419 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
failed_targets = []
class LogRunner:
def __init__(self, logname, make_env):
self.logname = logname
self.fd = open(logname, 'w')
self.make_env = make_env
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=self.make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.flush()
return result
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
def build(self):
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
print 'Building %s in %s log %s' % (self.name, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
# shutil.copyfile(defconfig, dotconfig) # Not really right.
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env,
stdin=devnull)
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
build = LogRunner(log_name, self.make_env)
for t in build_targets:
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(t, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env, stdin=devnull)
shutil.copyfile(savedefconfig, defconfig)
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
target.build()
if failed_targets:
fail("\n ".join(["Failed targets:"] +
[target.name for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
peterbraden/peterbraden.github.com | scripts/accountant.py | 1 | 13563 | """
Accountant
----------
A commandline utility for managing finance and accounting.
Concepts:
Transaction - double entry accounting.
Statement - Balance of assets at point in time for verification purpose
Account - superclass of Equity, Assets, Liabilities, Expenses, Income
TODO:
Sort out difference between 'my' accounts and other accounts. Show income/outgoings
Insert Account object into each transaction instead of key
"""
import datetime
import optparse
import bisect
import sys
from decimal import Decimal
accounts = {}
ordered_objects = []
reports = []
#========= DATA TYPES =============
class Statement():
def __init__(self, acct, date, currency, balance, *args, **kwargs):
self.date = datetime.datetime.strptime(date, '%Y-%m-%d')
self.balance = Decimal('%.2f' % balance)
self.account = acct
self.currency = currency
if acct not in accounts:
accounts[acct] = Account(acct, acct, currency = currency)
self.account = accounts[acct]
self.key = '%s%s' % (self.date.strftime('%Y-%m-%d'), '1s')
bisect.insort(ordered_objects, (self.key, self))
#accountant.add_statement(self)
def will_fire(*args, **kwargs):
return True
class Account():
def __init__(self, id, name, *args, **kwargs):
self.id = id
self.name = name
self.balance = 0
self.owner = kwargs.get('owner') or ''
self.currency = kwargs.get('currency') or '?'
accounts[self.id] = self
#accountant.add_account(self)
class Transaction():
def __init__(self, src, dest, date, currency, amount, category = None, *args, **kwargs):
self.dest_key = dest
self.date = datetime.datetime.strptime(date, '%Y-%m-%d')
self.currency = currency
self.amount = Decimal('%.2f' % amount)
self.category = category
if src not in accounts:
accounts[src] = Account(src, src, currency = self.currency)
if dest not in accounts:
accounts[dest] = Account(dest, dest, currency = self.currency)
self.source = accounts[src]
self.dest = accounts[dest]
# Hack
if self.source.currency == '?':
self.source.currency = self.currency
if self.dest.currency == '?':
self.dest.currency = self.currency
self.key = '%s%s' % (self.date.strftime('%Y-%m-%d'), '0t')
bisect.insort(ordered_objects, (self.key, self))
#accountant.add_transaction(self)
def will_fire(*args, **kwargs):
return True
def __str__(self):
return "Transaction('%s', '%s', '%s', '%s', %s)" %(
self.source.id,
self.dest.id,
self.date.strftime('%Y-%m-%d'),
self.currency,
self.amount)
#Temp Aliases
class BankAccount(Account):
pass
class Loan(Account):
pass
class CurrencyChg(object):
def __init__(self, src, dest, date, source_currency, source_amount, dest_currency, dest_amount, category = None, *args, **kwargs):
Transaction(src, 'curr_change', date, source_currency, source_amount, category, *args, **kwargs)
Transaction('curr_change', dest, date, dest_currency, dest_amount, category, *args, **kwargs)
class OtherCompaniesAccounts(object):#TODO! fix
def __init__(self, accounts):
for i in accounts:
Account(i,i)
Statement(i, '2005-10-01', '?', 0)
###====
class ProjectedTransaction(Transaction):
def will_fire(*args, **kwargs):
return kwargs.get('projection')
###===
class Report(object):
def __init__(self, account = None, owner = None, category= None, *args, **kwargs):
self.account = account
self.owner = owner
self.category = category
self.args = args
self.kwargs = kwargs
def object_event(self, object):
if not object.will_fire(*self.args, **self.kwargs):
return
if isinstance(object, Transaction):
if self.account and (object.source.id != self.account and object.dest.id != self.account):
return
if self.category and object.category != self.category:
return
self.transaction_event(object)
elif isinstance(object, Statement):
if self.owner and not (object.account.owner and object.account.owner in self.owner):
return
if self.account and self.account != object.account.id:
return
self.statement_event(object)
def output(self):
raise
def transaction_event(self, transaction):
pass
def statement_event(self, statement):
pass
class ValidateReport(Report):
"""
Validate Transaction History against statements.
"""
accts = {} #{'account' : (balance, date_last_statement)}
def object_event(self, object):
if isinstance(object, Transaction):
if self.account and (object.source.id != self.account and object.dest.id != self.account):
return
if self.category and object.category != self.category:
return
self.transaction_event(object)
elif isinstance(object, Statement):
if self.account and self.account != object.account.id:
return
self.statement_event(object)
def transaction_event(self, transaction):
if transaction.source.id in self.accts.keys():
self.accts[transaction.source.id] = (self.accts[transaction.source.id][0] - transaction.amount, self.accts[transaction.source.id][1])
else:
print >> sys.stderr, 'Account %s has no initial statement (%s)' % (transaction.source.name, transaction.date.strftime('%Y-%m-%d'))
self.accts[transaction.source.id] = (- transaction.amount, transaction.date) # Will invariably be wrong, but we warned them!
if transaction.dest.id in self.accts.keys():
self.accts[transaction.dest.id] = (self.accts[transaction.dest.id][0] + transaction.amount, self.accts[transaction.dest.id][1])
else:
print >> sys.stderr, 'Account %s has no initial statement (%s)' % (transaction.dest.name, transaction.date.strftime('%Y-%m-%d'))
self.accts[transaction.dest.id] = (transaction.amount, transaction.date) # Will invariably be wrong, but we warned them!
def statement_event(self, statement):
if statement.account.id in self.accts.keys():
if self.accts[statement.account.id][0] == statement.balance:
pass #Ok!
else:
#Transactions don't balance!
print >> sys.stderr, "Incomplete transactions for account %s in period %s -> %s (Statement : %s%s, Transactions: %s%s, Diff: %s%s) " % (
statement.account.name,
self.accts[statement.account.id][1].strftime('%Y-%m-%d'),
statement.date.strftime('%Y-%m-%d'),
statement.currency,
statement.balance,
statement.currency,
self.accts[statement.account.id][0],
statement.currency,
statement.balance - self.accts[statement.account.id][0]
)
self.accts[statement.account.id] = (statement.balance, statement.date)
else:
self.accts[statement.account.id] = (statement.balance, statement.date)
def output(self):
return ""
class MonthlySummaryReport(Report):
curr_month = 0
out = []
categories = {}
accounts = []
def __init__(self, *args, **kwargs):
self.start_date = kwargs.get('start_date') and kwargs.pop('start_date') or None
self.end_date = kwargs.get('end_date') and kwargs.pop('end_date') or None
super(MonthlySummaryReport, self).__init__(args, kwargs)
def object_event(self, object):
if self.start_date and self.start_date >= object.date:
return
if object.date.month != self.curr_month:
self.month_out()
self.out.append(object.date.strftime("%B %Y"))
self.curr_month = object.date.month
super(MonthlySummaryReport, self).object_event(object)
def transaction_event(self, transaction):
category = transaction.category or 'No Category'
if transaction.dest.owner in self.owner['owner']:
return
if self.categories.get(category):
if self.categories[category].get(transaction.currency):
x = self.categories[category][transaction.currency]
self.categories[category][transaction.currency] = (x[0] + transaction.amount, x[1] + transaction.amount)
else:
self.categories[category][transaction.currency] = (transaction.amount, transaction.amount)
else:
self.categories[category] = {transaction.currency : (transaction.amount, transaction.amount)}
def month_out(self):
"""
Append months summary to out and reset for next month
"""
categories_out = []
for k, v in self.categories.iteritems():
if not any([x[1] for x in v.values()]):
continue
values = ", ".join(['%s%.2f' % (currency, x[1]) for currency, x in v.iteritems() if x[1]])
categories_out.append("\t%s: %s" % (k, values))
for c, a in v.iteritems():
self.categories[k][c] = (v[c][0], 0)
self.out.append('\n'.join(categories_out))
def statement_event(self, statement):
pass
def output(self):
self.month_out()
return '\n'.join(self.out)
class BalanceReport(Report):
recent_statement = {}
recent_transactions = {}
def statement_event(self, statement):
self.recent_statement[statement.account.id] = statement
self.recent_transactions[statement.account.id] = []
def transaction_event(self, transaction):
if transaction.source.id in self.recent_transactions:
self.recent_transactions[transaction.source.id].append(transaction)
if transaction.dest.id in self.recent_transactions:
self.recent_transactions[transaction.dest.id].append(transaction)
def output(self):
acct_bals = []
totals = {}
for acct, statement in self.recent_statement.iteritems():
balance = statement.balance
dt = statement.date
for i in self.recent_transactions[acct]:
if i.source.id == acct:
balance -= i.amount
else:
balance += i.amount
dt = i.date
account = accounts[acct]
currency = account and account.currency or default_currency
name = account and account.name or acct
if balance == 0:
continue
acct_bals.append("%s% .2f \t :%s (%s)" % (currency, balance, name, dt.date()))
if currency in totals:
totals[currency] += balance
else:
totals[currency] = balance
acct_bals = "\n\t".join(acct_bals)
totals = ", ".join(['%s%s' % (k,v) for k,v in totals.iteritems()])
out = "\nCurrent Balance: \n\t%s \n\t----\nTotal: %s" % (acct_bals, totals)
return out
class NetWorthGraph(Report):
out = {}
prev = {}
def transaction_event(self, transaction):
l = self.out.get(transaction.currency, {})
k = transaction.date
if l.get(k):
w = l[k]
else:
w = self.prev.get(transaction.currency, 0)
if transaction.source.owner in self.owner and transaction.dest.owner not in self.owner:
w -= transaction.amount
elif transaction.dest.owner in self.owner and transaction.source.owner not in self.owner:
w += transaction.amount
l[k] = w
self.prev[transaction.currency] = w
self.out[transaction.currency] = l
def output(self):
return "\n".join(sorted(["%s, %s" % x for x in self.out["$"].items()]))
class IncomeOutgoingReport(Report):
incomings = {}
outgoings = {}
def transaction_event(self, transaction):
curr_i = self.incomings.get(transaction.dest.id, 0)
self.incomings[transaction.dest.id] = curr_i + transaction.amount
curr_o = self.outgoings.get(transaction.source.id, 0)
self.outgoings[transaction.source.id] = curr_o + transaction.amount
def output(self):
out = ["-"*82, "| %s | %s | %s | %s |" %( 'Account'.center(25), 'Income'.center(15), 'Outgoing'.center(15), 'Balance'.center(15)), "-"*82]
for acct in list(set(self.incomings.keys()).union(set(self.outgoings.keys()))):
if accounts[acct].owner not in self.owner:
continue
out.append('| %s | %s | %s | %s |' % (
accounts[acct].name[:25].ljust(25),
accounts[acct].currency + str(self.incomings.get(acct, '-')).ljust(14),
accounts[acct].currency + str(self.outgoings.get(acct, '-' )).ljust(14),
accounts[acct].currency + str(self.incomings.get(acct, 0) - self.outgoings.get(acct, 0)).ljust(14)
))
return '\n'.join(out)
def run_reports():
for obj in ordered_objects:
for report in reports:
report.object_event(obj[1])
for report in reports:
print report.output()
def run():
parser = optparse.OptionParser("usage: %prog [options] accounts", version='%prog 0.1')
parser.add_option('-a', '--account', action = 'append', dest = 'accounts', help = 'Limit output to specified account(s)')
parser.add_option('-o', '--owner', action = 'append', dest = 'owner', help = 'Limit output to specified owner(s)')
parser.add_option('-p', '--projection', action = 'store_true', dest = 'projection', help = '')
parser.add_option('-s', '--start_date', action = 'append', dest = 'start', help = 'Start reports after start date (YYYY-MM-DD)')
rpts = []
#==Reports
def balance(*args):
rpts.append(BalanceReport)
parser.add_option('-b', '--balance', action = 'callback', callback = balance, help = 'Print Balance information')
def in_out(*args):
rpts.append(IncomeOutgoingReport)
parser.add_option('-i', '--inout', action = 'callback', callback = in_out, help = ' ')
def monthly_summary(*args):
rpts.append(MonthlySummaryReport)
parser.add_option('-m', '--monthly_summary', action = 'callback', callback = monthly_summary, help = 'Summary of Month to Month Expenses')
def net_worth_graph(*args):
rpts.append(NetWorthGraph)
parser.add_option('-n', '--net', action = 'callback', callback = net_worth_graph, help = '')
def validate(*args):
rpts.append(ValidateReport)
parser.add_option('-v', '--validate', action = 'callback', callback = validate, help = 'Validate data is complete')
(options, args) = parser.parse_args()
for i in rpts:
reports.append(
i(
start_date = options.start and datetime.datetime.strptime(options.start[0], '%Y-%m-%d'),
**options.__dict__
)
)
for arg in args:
execfile(arg)
run_reports()
if __name__ == '__main__':
run() | mit |
pamfilos/invenio | modules/bibcheck/lib/bibcheck.py | 21 | 1848 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibCheck API
This API lets other modules interact with bibcheck.
"""
from invenio import bibcheck_task
def check_record(record, enabled_rules=None):
"""
Check a record agains some bibcheck rules.
@param record: Record to check
@type record: recstruct
@param enabled_rules: List of rules to run. Default None (run all rules)
@type enabled_rules: list
@returns: AmendableRecord with the list of errors/amendments
"""
plugins = bibcheck_task.load_plugins()
rules = bibcheck_task.load_rules(plugins)
record = bibcheck_task.AmendableRecord(record)
rule_names = set(rules.keys())
if enabled_rules is not None:
rule_names.intersection_update(enabled_rules)
for rule_name in rule_names:
rule = rules[rule_name]
record.set_rule(rule)
plugin = plugins[rule["check"]]
if plugin["batch"]:
plugin["check_records"]([record], **rule["checker_params"])
else:
plugin["check_record"](record, **rule["checker_params"])
return record
| gpl-2.0 |
depjs/dep | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 8 | 2261 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace('"', """).replace(">", ">")
if is_attrib:
data = data.replace("\r", "
").replace("\n", "
").replace("\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(' %s="' % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write('"')
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
daveywilkie/TweetMoar | tweepy/utils.py | 30 | 2663 | # Tweepy
# Copyright 2010 Joshua Roesslein
# See LICENSE for details.
from datetime import datetime
import time
import htmlentitydefs
import re
import locale
from urllib import quote
def parse_datetime(string):
# Set locale for date parsing
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, '')
return date
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def parse_search_datetime(string):
# Set locale for date parsing
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a, %d %b %Y %H:%M:%S +0000')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, '')
return date
def unescape_html(text):
"""Created by Fredrik Lundh (http://effbot.org/zone/re-sub.htm#unescape-html)"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, unicode):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg)
return arg
def import_simplejson():
try:
import simplejson as json
except ImportError:
try:
import json # Python 2.6+
except ImportError:
try:
from django.utils import simplejson as json # Google App Engine
except ImportError:
raise ImportError, "Can't load a json library"
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
def urlencode_noplus(query):
return '&'.join(['%s=%s' % (quote(str(k)), quote(str(v))) \
for k, v in query.iteritems()])
| mit |
rohanp/scikit-learn | sklearn/tree/tests/test_export.py | 31 | 9588 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
photoninger/ansible | lib/ansible/module_utils/facts/system/local.py | 144 | 3138 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import json
import os
import stat
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six.moves import StringIO
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
class LocalFactCollector(BaseFactCollector):
name = 'local'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
local_facts = {}
local_facts['local'] = {}
if not module:
return local_facts
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return local_facts
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact', '')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
try:
rc, out, err = module.run_command(fn)
except UnicodeError:
fact = 'error loading fact - output of running %s was not utf-8' % fn
local[fact_base] = fact
local_facts['local'] = local
return local_facts
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt] = val
local[fact_base] = fact
local_facts['local'] = local
return local_facts
| gpl-3.0 |
robertavram/Linux-Server-Configuration | FlaskApp/secret_keys.py | 1 | 1363 | CSRF_SECRET_KEY, SESSION_KEY = "0h97kel3aq17853645odikh97kel3aq4vndtonignnobfjh", "3aq4vnd4vndtonignnt801785onignnob"
# Google APIs
GOOGLE_APP_ID = '768017853645-odikh97kel3aq4vndtonignnobfjhkea.apps.googleusercontent.com'
GOOGLE_APP_SECRET = 'gb2X0NdP36xF-2kmj_S2IN3U'
#GOOGLE_REDIRECT_URI = 'http://localhost:5000/auth/google/callback'
#GOOGLE_REDIRECT_URI = 'http://www.flutterhub.com/auth/google/callback'
GOOGLE_REDIRECT_URI = 'http://52.27.185.214/auth/google/callback'
# Facebook auth apis
FB_APP_ID = '382093888646657'
FB_APP_SECRET = '2ba3373b14a801141d26c32bf9c9b205'
#FB_REDIRECT_URI = "http://localhost:5000/auth/facebook/callback"
#FB_REDIRECT_URI = "http://www.flutterhub.com/auth/facebook/callback"
FB_REDIRECT_URI = "http://52.27.185.214/auth/facebook/callback"
# Key/secret for both LinkedIn OAuth 1.0a and OAuth 2.0
# https://www.linkedin.com/secure/developer
LINKEDIN_KEY = 'consumer key'
LINKEDIN_SECRET = 'consumer secret'
# https://manage.dev.live.com/AddApplication.aspx
# https://manage.dev.live.com/Applications/Index
WL_CLIENT_ID = 'client id'
WL_CLIENT_SECRET = 'client secret'
# https://dev.twitter.com/apps
TWITTER_CONSUMER_KEY = 'oauth1.0a consumer key'
TWITTER_CONSUMER_SECRET = 'oauth1.0a consumer secret'
# https://foursquare.com/developers/apps
FOURSQUARE_CLIENT_ID = 'client id'
FOURSQUARE_CLIENT_SECRET = 'client secret'
| apache-2.0 |
aarondewindt/paparazzi_torrap | sw/tools/calibration/calib_mag_live.py | 13 | 6700 | #! /usr/bin/env python
from __future__ import print_function, division
import time
import logging
import sys
from os import path, getenv
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
PPRZ_HOME = getenv("PAPARAZZI_HOME", PPRZ_SRC)
from pprzlink.ivy import IvyMessagesInterface
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
import scipy
from scipy import optimize
import calibration_utils
class MagPlot(object):
def __init__(self):
# Setup the figure and axes...
self.fig = plt.figure()
#self.ax = self.fig.add_subplot(1, 1, 1, projection='3d')
self.ax = p3.Axes3D(self.fig)
self.ax.set_aspect('equal')
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
self.set_ax_lim(10)
self.ax.set_title("Raw Mag data")
self.data = np.zeros((1, 3))
self.max_lim = 1
# Then setup FuncAnimation.
self.ani = animation.FuncAnimation(self.fig, self.update, interval=100, init_func=self.setup_plot, blit=False)
def set_ax_lim(self, lim):
lim = [-lim, lim]
self.ax.set_xlim3d(lim)
self.ax.set_ylim3d(lim)
self.ax.set_zlim3d(lim)
def setup_plot(self):
x = self.data[:, 0]
y = self.data[:, 1]
z = self.data[:, 2]
self.scat = self.ax.scatter(x, y, z, alpha=1)
# For FuncAnimation's sake, we need to return the artist we'll be using
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def show(self, block=True):
plt.show(block=block)
def update(self, i):
logging.debug("updating scatter: %d with %s" % (i, len(self.data)))
self.scat.set_offsets(self.data[:, 0:2])
self.scat.set_3d_properties(self.data[:, 2], 'z')
# We need to return the updated artist for FuncAnimation to draw..
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def add_data(self, data):
logging.debug("adding data %s" % data)
if len(self.data) == 1 and not np.any(self.data):
self.data[0] = np.array(data)
else:
self.data = np.vstack((self.data, np.array(data)))
max_lim = np.max(np.abs(data))
if max_lim > self.max_lim:
self.max_lim = max_lim
self.set_ax_lim(max_lim)
class MagCalibrator(object):
def __init__(self, plot_results=True, verbose=False):
self._interface = IvyMessagesInterface("calib_mag")
self.plotter = MagPlot()
self.data = []
self.flt_meas = []
self.p0 = np.array([0, 0, 0, 0, 0, 0])
self.optimization_done = False
self.plot_results = plot_results
def start_collect(self):
self._interface.subscribe(self.message_recv, "(.*IMU_MAG_RAW.*)")
def stop_collect(self):
self._interface.unsubscribe_all()
def message_recv(self, ac_id, msg):
self.data.append(np.array([int(v) for v in msg.fieldvalues]))
if self.plot_results:
self.plotter.add_data((map(int, msg.fieldvalues)))
def shutdown(self):
if self._interface is not None:
print("Shutting down ivy interface...")
self._interface.shutdown()
self._interface = None
def __del__(self):
self.shutdown()
def calc_min_max_guess(self):
if len(self.data) > 3:
# filter out noisy measurements?
self.flt_meas = np.array(self.data)
self.p0 = calibration_utils.get_min_max_guess(self.flt_meas, 1.0)
def print_min_max_guess(self):
self.calc_min_max_guess()
if self.data:
print("Current guess from %d measurements: neutral [%d, %d, %d], scale [%.3f, %.3f, %.3f]" % (len(self.flt_meas),
int(round(self.p0[0])), int(round(self.p0[1])), int(round(self.p0[2])),
self.p0[3]*2**11, self.p0[4]*2**11, self.p0[5]*2**11))
def calibrate(self):
self.calc_min_max_guess()
if len(self.flt_meas) < 10:
logging.warning("Not enough measurements")
return
cp0, np0 = calibration_utils.scale_measurements(self.flt_meas, self.p0)
logging.info("initial guess : avg "+str(np0.mean())+" std "+str(np0.std()))
calibration_utils.print_xml(self.p0, "MAG", 11)
def err_func(p, meas, y):
cp, np = calibration_utils.scale_measurements(meas, p)
err = y * scipy.ones(len(meas)) - np
return err
p1, cov, info, msg, success = optimize.leastsq(err_func, self.p0[:], args=(self.flt_meas, 1.0), full_output=1)
self.optimization_done = success in [1, 2, 3, 4]
if not self.optimization_done:
logging.warning("Optimization error: ", msg)
cp1, np1 = calibration_utils.scale_measurements(self.flt_meas, p1)
if self.optimization_done:
logging.info("optimized guess : avg " + str(np1.mean()) + " std " + str(np1.std()))
calibration_utils.print_xml(p1, "MAG", 11)
else:
logging.info("last iteration of failed optimized guess : avg " + str(np1.mean()) + " std " + str(np1.std()))
if self.plot_results:
calibration_utils.plot_results("MAG", np.array(self.data), range(len(self.data)),
self.flt_meas, cp0, np0, cp1, np1, 1.0, blocking=False)
calibration_utils.plot_mag_3d(self.flt_meas, cp1, p1)
if __name__ == '__main__':
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--plot', action='store_true', help='Interactive plot')
args = parser.parse_args()
if args.plot:
print("Close the interactive plot window to run the final calibration.")
else:
print("Press CTRL-C to stop data collection and run the final calibration.")
try:
mc = MagCalibrator(plot_results=args.plot)
mc.start_collect()
if args.plot:
mc.plotter.show()
else:
while True:
time.sleep(2)
mc.print_min_max_guess()
except KeyboardInterrupt:
print("Stopping on request")
mc.stop_collect()
mc.calibrate()
mc.shutdown()
| gpl-2.0 |
tomato42/fsresck | tests/test_image.py | 1 | 12496 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <hubert@kario.pl>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import mock
from mock import call
except ImportError:
import unittest.mock as mock
from unittest.mock import call
import sys
if sys.version_info[0] == 2:
import __builtin__ as builtins
else:
import builtins
import os
import tempfile
import subprocess
from fsresck.image import Image
from fsresck.write import Write
from fsresck.errors import FSCopyError
class TestImage(unittest.TestCase):
def test___init__(self):
image = Image(None, None)
self.assertIsNotNone(image)
def test___repr__(self):
image = Image("/tmp/test.1", [Write(offset=4, data='aa')])
self.assertEqual("Image(image_name='/tmp/test.1', "\
"writes=[<Write offset=4, "\
"len(data)=2>])", repr(image))
def test___repr___adter_create_image(self):
image = Image("/tmp/test.1", [Write(offset=4, data='aa')])
image.temp_image_name = '/tmp/test.2'
self.assertEqual("Image(image_name='/tmp/test.2', writes=[])",
repr(image))
def test_create_image_and_cleanup(self):
image = Image("/tmp/test.1", [Write(offset=4, data='aa')])
# mock setup
patcher = mock.patch.object(builtins,
'open',
mock.mock_open())
mock_open = patcher.start()
self.addCleanup(patcher.stop)
mkstemp = mock.create_autospec(tempfile.mkstemp)
mkstemp.return_value = (-33, '/tmp/fsresck.xxxx')
patcher = mock.patch.object(tempfile,
'mkstemp',
mkstemp)
mock_mkstemp = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'close',
mock.MagicMock())
mock_close = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(subprocess,
'call',
mock.create_autospec(subprocess.call,
return_value=0))
mock_call = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'unlink',
mock.MagicMock())
mock_unlink = patcher.start()
self.addCleanup(patcher.stop)
# test
image_name = image.create_image('/tmp')
self.assertEqual('/tmp/fsresck.xxxx', image_name)
self.assertEqual(image_name, image.create_image("/tmp"))
image.cleanup()
self.assertEqual(None, image.temp_image_name)
# mock asserts
self.assertEqual(mock_open.call_count, 1)
self.assertEqual(mock_open.call_args, mock.call('/tmp/fsresck.xxxx',
'r+b'))
handle = mock_open()
self.assertEqual(handle.seek.call_args, mock.call(4))
self.assertEqual(handle.write.call_args, mock.call('aa'))
self.assertEqual(mock_mkstemp.call_count, 1)
self.assertEqual(mock_mkstemp.call_args, mock.call(prefix='fsresck.',
dir='/tmp'))
self.assertEqual(mock_close.call_count, 1)
self.assertEqual(mock_close.call_args, mock.call(-33))
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args, mock.call(['cp',
'--reflink=auto',
'--sparse=auto',
'/tmp/test.1',
'/tmp/fsresck.xxxx']))
self.assertEqual(mock_unlink.call_count, 1)
self.assertEqual(mock_unlink.call_args, mock.call('/tmp/fsresck.xxxx'))
def test_create_image_with_failed_copy(self):
image = Image("/tmp/test.1", [Write(offset=4, data='aa')])
# mock setup
patcher = mock.patch.object(builtins,
'open',
mock.mock_open())
mock_open = patcher.start()
self.addCleanup(patcher.stop)
mkstemp = mock.create_autospec(tempfile.mkstemp)
mkstemp.return_value = (-33, '/tmp/fsresck.xxxx')
patcher = mock.patch.object(tempfile,
'mkstemp',
mkstemp)
mock_mkstemp = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'close',
mock.MagicMock())
mock_close = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(subprocess,
'call',
mock.create_autospec(subprocess.call,
return_value=1))
mock_call = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'unlink',
mock.MagicMock())
mock_unlink = patcher.start()
self.addCleanup(patcher.stop)
# test
with self.assertRaises(FSCopyError):
image.create_image('/tmp')
# mock asserts
self.assertEqual(mock_open.call_count, 0)
self.assertEqual(mock_mkstemp.call_count, 1)
self.assertEqual(mock_mkstemp.call_args, mock.call(prefix='fsresck.',
dir='/tmp'))
self.assertEqual(mock_close.call_count, 1)
self.assertEqual(mock_close.call_args, mock.call(-33))
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args, mock.call(['cp',
'--reflink=auto',
'--sparse=auto',
'/tmp/test.1',
'/tmp/fsresck.xxxx']))
self.assertEqual(mock_unlink.call_count, 0)
def test_create_image_twice(self):
image = Image("/tmp/test.1", [Write(offset=4, data='aa')])
# mock setup
patcher = mock.patch.object(builtins,
'open',
mock.mock_open())
mock_open = patcher.start()
open_patcher = patcher
mkstemp = mock.create_autospec(tempfile.mkstemp)
mkstemp.return_value = (-33, '/tmp/fsresck.xxxx')
patcher = mock.patch.object(tempfile,
'mkstemp',
mkstemp)
mock_mkstemp = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'close',
mock.MagicMock())
mock_close = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(subprocess,
'call',
mock.create_autospec(subprocess.call,
return_value=0))
mock_call = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(os,
'unlink',
mock.MagicMock())
mock_unlink = patcher.start()
self.addCleanup(patcher.stop)
# test
image_name = image.create_image('/tmp')
self.assertEqual('/tmp/fsresck.xxxx', image_name)
image.cleanup()
self.assertEqual(None, image.temp_image_name)
# mock asserts
self.assertEqual(mock_open.call_count, 1)
self.assertEqual(mock_open.call_args, mock.call('/tmp/fsresck.xxxx',
'r+b'))
handle = mock_open()
self.assertEqual(handle.seek.call_args, mock.call(4))
self.assertEqual(handle.write.call_args, mock.call('aa'))
self.assertEqual(mock_mkstemp.call_count, 1)
self.assertEqual(mock_mkstemp.call_args, mock.call(prefix='fsresck.',
dir='/tmp'))
self.assertEqual(mock_close.call_count, 1)
self.assertEqual(mock_close.call_args, mock.call(-33))
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args, mock.call(['cp',
'--reflink=auto',
'--sparse=auto',
'/tmp/test.1',
'/tmp/fsresck.xxxx']))
self.assertEqual(mock_unlink.call_count, 1)
self.assertEqual(mock_unlink.call_args, mock.call('/tmp/fsresck.xxxx'))
# make space for second run
open_patcher.stop()
patcher = mock.patch.object(builtins,
'open',
mock.mock_open())
mock_open = patcher.start()
self.addCleanup(patcher.stop)
mock_mkstemp.reset_mock()
mock_close.reset_mock()
mock_call.reset_mock()
mock_unlink.reset_mock()
# change temporary file name
mock_mkstemp.return_value = (-33, '/tmp/fsresck.yyyy')
# test
image_name = image.create_image('/tmp')
self.assertEqual('/tmp/fsresck.yyyy', image_name)
image.cleanup()
# check if the second run creates the file with same contents
# the second run is to get "handle"
self.assertEqual(mock_open.call_count, 1)
self.assertEqual(mock_open.call_args, mock.call('/tmp/fsresck.yyyy',
'r+b'))
handle = mock_open()
self.assertEqual(handle.seek.call_args, mock.call(4))
self.assertEqual(handle.write.call_args, mock.call('aa'))
self.assertEqual(mock_mkstemp.call_count, 1)
self.assertEqual(mock_mkstemp.call_args, mock.call(prefix='fsresck.',
dir='/tmp'))
self.assertEqual(mock_close.call_count, 1)
self.assertEqual(mock_close.call_args, mock.call(-33))
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args, mock.call(['cp',
'--reflink=auto',
'--sparse=auto',
'/tmp/test.1',
'/tmp/fsresck.yyyy']))
self.assertEqual(mock_unlink.call_count, 1)
self.assertEqual(mock_unlink.call_args, mock.call('/tmp/fsresck.yyyy'))
| gpl-2.0 |
MichaelNedzelsky/intellij-community | python/testData/MockSdk2.7/python_stubs/__builtin__.py | 40 | 174842 | # encoding: utf-8
# module __builtin__
# from (built-in)
# by generator 1.136
from __future__ import print_function
"""
Built-in functions, exceptions, and other objects.
Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices.
"""
# imports
from exceptions import (ArithmeticError, AssertionError, AttributeError,
BaseException, BufferError, BytesWarning, DeprecationWarning, EOFError,
EnvironmentError, Exception, FloatingPointError, FutureWarning,
GeneratorExit, IOError, ImportError, ImportWarning, IndentationError,
IndexError, KeyError, KeyboardInterrupt, LookupError, MemoryError,
NameError, NotImplementedError, OSError, OverflowError,
PendingDeprecationWarning, ReferenceError, RuntimeError, RuntimeWarning,
StandardError, StopIteration, SyntaxError, SyntaxWarning, SystemError,
SystemExit, TabError, TypeError, UnboundLocalError, UnicodeDecodeError,
UnicodeEncodeError, UnicodeError, UnicodeTranslateError, UnicodeWarning,
UserWarning, ValueError, Warning, ZeroDivisionError)
# Variables with simple values
False = False
None = object() # real value of type <type 'NoneType'> replaced
True = True
__debug__ = True
# functions
def abs(number): # real signature unknown; restored from __doc__
"""
abs(number) -> number
Return the absolute value of the argument.
"""
return 0
def all(iterable): # real signature unknown; restored from __doc__
"""
all(iterable) -> bool
Return True if bool(x) is True for all values x in the iterable.
If the iterable is empty, return True.
"""
return False
def any(iterable): # real signature unknown; restored from __doc__
"""
any(iterable) -> bool
Return True if bool(x) is True for any x in the iterable.
If the iterable is empty, return False.
"""
return False
def apply(p_object, args=None, kwargs=None): # real signature unknown; restored from __doc__
"""
apply(object[, args[, kwargs]]) -> value
Call a callable object with positional arguments taken from the tuple args,
and keyword arguments taken from the optional dictionary kwargs.
Note that classes are callable, as are instances with a __call__() method.
Deprecated since release 2.3. Instead, use the extended call syntax:
function(*args, **keywords).
"""
pass
def bin(number): # real signature unknown; restored from __doc__
"""
bin(number) -> string
Return the binary representation of an integer or long integer.
"""
return ""
def callable(p_object): # real signature unknown; restored from __doc__
"""
callable(object) -> bool
Return whether the object is callable (i.e., some kind of function).
Note that classes are callable, as are instances with a __call__() method.
"""
return False
def chr(i): # real signature unknown; restored from __doc__
"""
chr(i) -> character
Return a string of one character with ordinal i; 0 <= i < 256.
"""
return ""
def cmp(x, y): # real signature unknown; restored from __doc__
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return 0
def coerce(x, y): # real signature unknown; restored from __doc__
"""
coerce(x, y) -> (x1, y1)
Return a tuple consisting of the two numeric arguments converted to
a common type, using the same rules as used by arithmetic operations.
If coercion is not possible, raise TypeError.
"""
pass
def compile(source, filename, mode, flags=None, dont_inherit=None): # real signature unknown; restored from __doc__
"""
compile(source, filename, mode[, flags[, dont_inherit]]) -> code object
Compile the source string (a Python module, statement or expression)
into a code object that can be executed by the exec statement or eval().
The filename will be used for run-time error messages.
The mode must be 'exec' to compile a module, 'single' to compile a
single (interactive) statement, or 'eval' to compile an expression.
The flags argument, if present, controls which future statements influence
the compilation of the code.
The dont_inherit argument, if non-zero, stops the compilation inheriting
the effects of any future statements in effect in the code calling
compile; if absent or zero these statements do influence the compilation,
in addition to any features explicitly specified.
"""
pass
def copyright(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def credits(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def delattr(p_object, name): # real signature unknown; restored from __doc__
"""
delattr(object, name)
Delete a named attribute on an object; delattr(x, 'y') is equivalent to
``del x.y''.
"""
pass
def dir(p_object=None): # real signature unknown; restored from __doc__
"""
dir([object]) -> list of strings
If called without an argument, return the names in the current scope.
Else, return an alphabetized list of names comprising (some of) the attributes
of the given object, and of attributes reachable from it.
If the object supplies a method named __dir__, it will be used; otherwise
the default dir() logic is used and returns:
for a module object: the module's attributes.
for a class object: its attributes, and recursively the attributes
of its bases.
for any other object: its attributes, its class's attributes, and
recursively the attributes of its class's base classes.
"""
return []
def divmod(x, y): # known case of __builtin__.divmod
"""
divmod(x, y) -> (quotient, remainder)
Return the tuple ((x-x%y)/y, x%y). Invariant: div*y + mod == x.
"""
return (0, 0)
def eval(source, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
eval(source[, globals[, locals]]) -> value
Evaluate the source in the context of globals and locals.
The source may be a string representing a Python expression
or a code object as returned by compile().
The globals must be a dictionary and locals can be any mapping,
defaulting to the current globals and locals.
If only globals is given, locals defaults to it.
"""
pass
def execfile(filename, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
execfile(filename[, globals[, locals]])
Read and execute a Python script from a file.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
pass
def exit(*args, **kwargs): # real signature unknown
pass
def filter(function_or_none, sequence): # known special case of filter
"""
filter(function or None, sequence) -> list, tuple, or string
Return those items of sequence for which function(item) is true. If
function is None, return the items that are true. If sequence is a tuple
or string, return the same type, else return a list.
"""
pass
def format(value, format_spec=None): # real signature unknown; restored from __doc__
"""
format(value[, format_spec]) -> string
Returns value.__format__(format_spec)
format_spec defaults to ""
"""
return ""
def getattr(object, name, default=None): # known special case of getattr
"""
getattr(object, name[, default]) -> value
Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
When a default argument is given, it is returned when the attribute doesn't
exist; without it, an exception is raised in that case.
"""
pass
def globals(): # real signature unknown; restored from __doc__
"""
globals() -> dictionary
Return the dictionary containing the current scope's global variables.
"""
return {}
def hasattr(p_object, name): # real signature unknown; restored from __doc__
"""
hasattr(object, name) -> bool
Return whether the object has an attribute with the given name.
(This is done by calling getattr(object, name) and catching exceptions.)
"""
return False
def hash(p_object): # real signature unknown; restored from __doc__
"""
hash(object) -> integer
Return a hash value for the object. Two objects with the same value have
the same hash value. The reverse is not necessarily true, but likely.
"""
return 0
def help(with_a_twist): # real signature unknown; restored from __doc__
"""
Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
pass
def hex(number): # real signature unknown; restored from __doc__
"""
hex(number) -> string
Return the hexadecimal representation of an integer or long integer.
"""
return ""
def id(p_object): # real signature unknown; restored from __doc__
"""
id(object) -> integer
Return the identity of an object. This is guaranteed to be unique among
simultaneously existing objects. (Hint: it's the object's memory address.)
"""
return 0
def input(prompt=None): # real signature unknown; restored from __doc__
"""
input([prompt]) -> value
Equivalent to eval(raw_input(prompt)).
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def isinstance(p_object, class_or_type_or_tuple): # real signature unknown; restored from __doc__
"""
isinstance(object, class-or-type-or-tuple) -> bool
Return whether an object is an instance of a class or of a subclass thereof.
With a type as second argument, return whether that is the object's type.
The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for
isinstance(x, A) or isinstance(x, B) or ... (etc.).
"""
return False
def issubclass(C, B): # real signature unknown; restored from __doc__
"""
issubclass(C, B) -> bool
Return whether class C is a subclass (i.e., a derived class) of class B.
When using a tuple as the second argument issubclass(X, (A, B, ...)),
is a shortcut for issubclass(X, A) or issubclass(X, B) or ... (etc.).
"""
return False
def iter(source, sentinel=None): # known special case of iter
"""
iter(collection) -> iterator
iter(callable, sentinel) -> iterator
Get an iterator from an object. In the first form, the argument must
supply its own iterator, or be a sequence.
In the second form, the callable is called until it returns the sentinel.
"""
pass
def len(p_object): # real signature unknown; restored from __doc__
"""
len(object) -> integer
Return the number of items of a sequence or mapping.
"""
return 0
def license(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def locals(): # real signature unknown; restored from __doc__
"""
locals() -> dictionary
Update and return a dictionary containing the current scope's local variables.
"""
return {}
def map(function, sequence, *sequence_1): # real signature unknown; restored from __doc__
"""
map(function, sequence[, sequence, ...]) -> list
Return a list of the results of applying the function to the items of
the argument sequence(s). If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of
the items of the sequence (or a list of tuples if more than one sequence).
"""
return []
def max(*args, **kwargs): # known special case of max
"""
max(iterable[, key=func]) -> value
max(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its largest item.
With two or more arguments, return the largest argument.
"""
pass
def min(*args, **kwargs): # known special case of min
"""
min(iterable[, key=func]) -> value
min(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its smallest item.
With two or more arguments, return the smallest argument.
"""
pass
def next(iterator, default=None): # real signature unknown; restored from __doc__
"""
next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration.
"""
pass
def oct(number): # real signature unknown; restored from __doc__
"""
oct(number) -> string
Return the octal representation of an integer or long integer.
"""
return ""
def open(name, mode=None, buffering=None): # real signature unknown; restored from __doc__
"""
open(name[, mode[, buffering]]) -> file object
Open a file using the file() type, returns a file object. This is the
preferred way to open a file. See file.__doc__ for further information.
"""
return file('/dev/null')
def ord(c): # real signature unknown; restored from __doc__
"""
ord(c) -> integer
Return the integer ordinal of a one-character string.
"""
return 0
def pow(x, y, z=None): # real signature unknown; restored from __doc__
"""
pow(x, y[, z]) -> number
With two arguments, equivalent to x**y. With three arguments,
equivalent to (x**y) % z, but may be more efficient (e.g. for longs).
"""
return 0
def print(*args, **kwargs): # known special case of print
"""
print(value, ..., sep=' ', end='\n', file=sys.stdout)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
"""
pass
def quit(*args, **kwargs): # real signature unknown
pass
def range(start=None, stop=None, step=None): # known special case of range
"""
range(stop) -> list of integers
range(start, stop[, step]) -> list of integers
Return a list containing an arithmetic progression of integers.
range(i, j) returns [i, i+1, i+2, ..., j-1]; start (!) defaults to 0.
When step is given, it specifies the increment (or decrement).
For example, range(4) returns [0, 1, 2, 3]. The end point is omitted!
These are exactly the valid indices for a list of 4 elements.
"""
pass
def raw_input(prompt=None): # real signature unknown; restored from __doc__
"""
raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading.
"""
return ""
def reduce(function, sequence, initial=None): # real signature unknown; restored from __doc__
"""
reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty.
"""
pass
def reload(module): # real signature unknown; restored from __doc__
"""
reload(module) -> module
Reload the module. The module must have been successfully imported before.
"""
pass
def repr(p_object): # real signature unknown; restored from __doc__
"""
repr(object) -> string
Return the canonical string representation of the object.
For most object types, eval(repr(object)) == object.
"""
return ""
def round(number, ndigits=None): # real signature unknown; restored from __doc__
"""
round(number[, ndigits]) -> floating point number
Round a number to a given precision in decimal digits (default 0 digits).
This always returns a floating point number. Precision may be negative.
"""
return 0.0
def setattr(p_object, name, value): # real signature unknown; restored from __doc__
"""
setattr(object, name, value)
Set a named attribute on an object; setattr(x, 'y', v) is equivalent to
``x.y = v''.
"""
pass
def sorted(iterable, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
""" sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list """
pass
def sum(sequence, start=None): # real signature unknown; restored from __doc__
"""
sum(sequence[, start]) -> value
Return the sum of a sequence of numbers (NOT strings) plus the value
of parameter 'start' (which defaults to 0). When the sequence is
empty, return start.
"""
pass
def unichr(i): # real signature unknown; restored from __doc__
"""
unichr(i) -> Unicode character
Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.
"""
return u""
def vars(p_object=None): # real signature unknown; restored from __doc__
"""
vars([object]) -> dictionary
Without arguments, equivalent to locals().
With an argument, equivalent to object.__dict__.
"""
return {}
def zip(seq1, seq2, *more_seqs): # known special case of zip
"""
zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)]
Return a list of tuples, where each tuple contains the i-th element
from each of the argument sequences. The returned list is truncated
in length to the length of the shortest argument sequence.
"""
pass
def __import__(name, globals={}, locals={}, fromlist=[], level=-1): # real signature unknown; restored from __doc__
"""
__import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module
Import a module. Because this function is meant for use by the Python
interpreter and not for general use it is better to use
importlib.import_module() to programmatically import a module.
The globals argument is only used to determine the context;
they are not modified. The locals argument is unused. The fromlist
should be a list of names to emulate ``from name import ...'', or an
empty list to emulate ``import name''.
When importing a module from a package, note that __import__('A.B', ...)
returns package A when fromlist is empty, but its submodule B when
fromlist is not empty. Level is used to determine whether to perform
absolute or relative imports. -1 is the original strategy of attempting
both absolute and relative imports, 0 is absolute, a positive number
is the number of parent directories to search relative to the current module.
"""
pass
# classes
class ___Classobj:
'''A mock class representing the old style class base.'''
__module__ = ''
__class__ = None
def __init__(self):
pass
__dict__ = {}
__doc__ = ''
class __generator(object):
'''A mock class representing the generator function type.'''
def __init__(self):
self.gi_code = None
self.gi_frame = None
self.gi_running = 0
def __iter__(self):
'''Defined to support iteration over container.'''
pass
def next(self):
'''Return the next item from the container.'''
pass
def close(self):
'''Raises new GeneratorExit exception inside the generator to terminate the iteration.'''
pass
def send(self, value):
'''Resumes the generator and "sends" a value that becomes the result of the current yield-expression.'''
pass
def throw(self, type, value=None, traceback=None):
'''Used to raise an exception inside the generator.'''
pass
class __function(object):
'''A mock class representing function type.'''
def __init__(self):
self.__name__ = ''
self.__doc__ = ''
self.__dict__ = ''
self.__module__ = ''
self.func_defaults = {}
self.func_globals = {}
self.func_closure = None
self.func_code = None
self.func_name = ''
self.func_doc = ''
self.func_dict = ''
self.__defaults__ = {}
self.__globals__ = {}
self.__closure__ = None
self.__code__ = None
self.__name__ = ''
class __method(object):
'''A mock class representing method type.'''
def __init__(self):
self.im_class = None
self.im_self = None
self.im_func = None
self.__func__ = None
self.__self__ = None
class __namedtuple(tuple):
'''A mock base class for named tuples.'''
__slots__ = ()
_fields = ()
def __new__(cls, *args, **kwargs):
'Create a new instance of the named tuple.'
return tuple.__new__(cls, *args)
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new named tuple object from a sequence or iterable.'
return new(cls, iterable)
def __repr__(self):
return ''
def _asdict(self):
'Return a new dict which maps field types to their values.'
return {}
def _replace(self, **kwargs):
'Return a new named tuple object replacing specified fields with new values.'
return self
def __getnewargs__(self):
return tuple(self)
class object:
""" The most base type """
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" default object formatter """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self): # known special case of object.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
@staticmethod # known case of __new__
def __new__(cls, *more): # known special case of object.__new__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
__sizeof__() -> int
size of object in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
@classmethod # known case
def __subclasshook__(cls, subclass): # known special case of object.__subclasshook__
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
__class__ = None # (!) forward: type, real value is ''
__dict__ = {}
__doc__ = ''
__module__ = ''
class basestring(object):
""" Type basestring cannot be instantiated; it is the base for str and unicode. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class int(object):
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
int.bit_length() -> int
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x, base=10): # known special case of int.__init__
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
# (copied from class doc)
"""
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class bool(int):
"""
bool(x) -> bool
Returns True when the argument x is true, False otherwise.
The builtins True and False are the only two instances of the class bool.
The class bool is a subclass of the class int, and cannot be subclassed.
"""
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class buffer(object):
"""
buffer(object [, offset[, size]])
Create a new buffer object which references the given object.
The buffer will reference a slice of the target object from the
start of the object (or at the specified offset). The slice will
extend to the end of the target object (or with the specified size).
"""
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, p_object, offset=None, size=None): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class bytearray(object):
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
"""
def append(self, p_int): # real signature unknown; restored from __doc__
"""
B.append(int) -> None
Append a single item to the end of B.
"""
pass
def capitalize(self): # real signature unknown; restored from __doc__
"""
B.capitalize() -> copy of B
Return a copy of B with only its first character capitalized (ASCII)
and the rest lower-cased.
"""
pass
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.center(width[, fillchar]) -> copy of B
Return B centered in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.count(sub [,start [,end]]) -> int
Return the number of non-overlapping occurrences of subsection sub in
bytes B[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
B.decode([encoding[, errors]]) -> unicode object.
Decodes B using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return u""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.endswith(suffix [,start [,end]]) -> bool
Return True if B ends with the specified suffix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
B.expandtabs([tabsize]) -> copy of B
Return a copy of B where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
pass
def extend(self, iterable_int): # real signature unknown; restored from __doc__
"""
B.extend(iterable int) -> None
Append all the elements from the iterator or sequence to the
end of B.
"""
pass
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.find(sub [,start [,end]]) -> int
Return the lowest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
@classmethod # known case
def fromhex(cls, string): # real signature unknown; restored from __doc__
"""
bytearray.fromhex(string) -> bytearray
Create a bytearray object from a string of hexadecimal numbers.
Spaces between two numbers are accepted.
Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef').
"""
return bytearray
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.index(sub [,start [,end]]) -> int
Like B.find() but raise ValueError when the subsection is not found.
"""
return 0
def insert(self, index, p_int): # real signature unknown; restored from __doc__
"""
B.insert(index, int) -> None
Insert a single item into the bytearray before the given index.
"""
pass
def isalnum(self): # real signature unknown; restored from __doc__
"""
B.isalnum() -> bool
Return True if all characters in B are alphanumeric
and there is at least one character in B, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
B.isalpha() -> bool
Return True if all characters in B are alphabetic
and there is at least one character in B, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
B.isdigit() -> bool
Return True if all characters in B are digits
and there is at least one character in B, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
B.islower() -> bool
Return True if all cased characters in B are lowercase and there is
at least one cased character in B, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
B.isspace() -> bool
Return True if all characters in B are whitespace
and there is at least one character in B, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
B.istitle() -> bool
Return True if B is a titlecased string and there is at least one
character in B, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
B.isupper() -> bool
Return True if all cased characters in B are uppercase and there is
at least one cased character in B, False otherwise.
"""
return False
def join(self, iterable_of_bytes): # real signature unknown; restored from __doc__
"""
B.join(iterable_of_bytes) -> bytes
Concatenates any number of bytearray objects, with B in between each pair.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.ljust(width[, fillchar]) -> copy of B
Return B left justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def lower(self): # real signature unknown; restored from __doc__
"""
B.lower() -> copy of B
Return a copy of B with all ASCII characters converted to lowercase.
"""
pass
def lstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.lstrip([bytes]) -> bytearray
Strip leading bytes contained in the argument.
If the argument is omitted, strip leading ASCII whitespace.
"""
return bytearray
def partition(self, sep): # real signature unknown; restored from __doc__
"""
B.partition(sep) -> (head, sep, tail)
Searches for the separator sep in B, and returns the part before it,
the separator itself, and the part after it. If the separator is not
found, returns B and two empty bytearray objects.
"""
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
B.pop([index]) -> int
Remove and return a single item from B. If no index
argument is given, will pop the last value.
"""
return 0
def remove(self, p_int): # real signature unknown; restored from __doc__
"""
B.remove(int) -> None
Remove the first occurance of a value in B.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
B.replace(old, new[, count]) -> bytes
Return a copy of B with all occurrences of subsection
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def reverse(self): # real signature unknown; restored from __doc__
"""
B.reverse() -> None
Reverse the order of the values in B in place.
"""
pass
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rfind(sub [,start [,end]]) -> int
Return the highest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rindex(sub [,start [,end]]) -> int
Like B.rfind() but raise ValueError when the subsection is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.rjust(width[, fillchar]) -> copy of B
Return B right justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
pass
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
B.rpartition(sep) -> (head, sep, tail)
Searches for the separator sep in B, starting at the end of B,
and returns the part before it, the separator itself, and the
part after it. If the separator is not found, returns two empty
bytearray objects and B.
"""
pass
def rsplit(self, sep, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.rsplit(sep[, maxsplit]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter,
starting at the end of B and working to the front.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def rstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.rstrip([bytes]) -> bytearray
Strip trailing bytes contained in the argument.
If the argument is omitted, strip trailing ASCII whitespace.
"""
return bytearray
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.split([sep[, maxsplit]]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
B.splitlines(keepends=False) -> list of lines
Return a list of the lines in B, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.startswith(prefix [,start [,end]]) -> bool
Return True if B starts with the specified prefix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.strip([bytes]) -> bytearray
Strip leading and trailing bytes contained in the argument.
If the argument is omitted, strip ASCII whitespace.
"""
return bytearray
def swapcase(self): # real signature unknown; restored from __doc__
"""
B.swapcase() -> copy of B
Return a copy of B with uppercase ASCII characters converted
to lowercase ASCII and vice versa.
"""
pass
def title(self): # real signature unknown; restored from __doc__
"""
B.title() -> copy of B
Return a titlecased version of B, i.e. ASCII words start with uppercase
characters, all remaining cased characters have lowercase.
"""
pass
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
B.translate(table[, deletechars]) -> bytearray
Return a copy of B, where all characters occurring in the
optional argument deletechars are removed, and the remaining
characters have been mapped through the given translation
table, which must be a bytes object of length 256.
"""
return bytearray
def upper(self): # real signature unknown; restored from __doc__
"""
B.upper() -> copy of B
Return a copy of B with all ASCII characters converted to uppercase.
"""
pass
def zfill(self, width): # real signature unknown; restored from __doc__
"""
B.zfill(width) -> copy of B
Pad a numeric string B with zeros on the left, to fill a field
of the specified width. B is never truncated.
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __alloc__(self): # real signature unknown; restored from __doc__
"""
B.__alloc__() -> int
Returns the number of bytes actually allocated.
"""
return 0
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, source=None, encoding=None, errors='strict'): # known special case of bytearray.__init__
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
B.__sizeof__() -> int
Returns the size of B in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class str(basestring):
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> string
Return a copy of the string S with only its first character
capitalized.
"""
return ""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> string
Return S centered in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
string S[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> object
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return object()
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> object
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that is able to handle UnicodeEncodeErrors.
"""
return object()
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> string
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return ""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(*args, **kwargs): # known special case of str.format
"""
S.format(*args, **kwargs) -> string
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> string
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> string
Return S left-justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
return ""
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> string
Return a copy of the string S converted to lowercase.
"""
return ""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> string or unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> string
Return a copy of string S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> string
Return S right-justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> string or unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are removed
from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> string or unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> string
Return a copy of the string S with uppercase characters
converted to lowercase and vice versa.
"""
return ""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> string
Return a titlecased version of S, i.e. words start with uppercase
characters, all remaining cased characters have lowercase.
"""
return ""
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
S.translate(table [,deletechars]) -> string
Return a copy of the string S, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256 or None.
If the table argument is None, no translation is applied and
the operation simply removes the characters in deletechars.
"""
return ""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
return ""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> string
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return ""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> string
Return a formatted version of S as described by format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=''): # known special case of str.__init__
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
bytes = str
class classmethod(object):
"""
classmethod(function) -> method
Convert a function to be a class method.
A class method receives the class as implicit first argument,
just like an instance method receives the instance.
To declare a class method, use this idiom:
class C:
def f(cls, arg1, arg2, ...): ...
f = classmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
If a class method is called for a derived class, the derived class
object is passed as the implied first argument.
Class methods are different than C++ or Java static methods.
If you want those, see the staticmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class complex(object):
"""
complex(real[, imag]) -> complex number
Create a complex number from a real part and an optional imaginary part.
This is equivalent to (real + imag*1j) where imag defaults to 0.
"""
def conjugate(self): # real signature unknown; restored from __doc__
"""
complex.conjugate() -> complex
Return the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.
"""
return complex
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self): # real signature unknown; restored from __doc__
"""
complex.__format__() -> str
Convert to a string according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, real, imag=None): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
imag = property(lambda self: 0.0)
"""the imaginary part of a complex number
:type: float
"""
real = property(lambda self: 0.0)
"""the real part of a complex number
:type: float
"""
class dict(object):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
def clear(self): # real signature unknown; restored from __doc__
""" D.clear() -> None. Remove all items from D. """
pass
def copy(self): # real signature unknown; restored from __doc__
""" D.copy() -> a shallow copy of D """
pass
@staticmethod # known case
def fromkeys(S, v=None): # real signature unknown; restored from __doc__
"""
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
v defaults to None.
"""
pass
def get(self, k, d=None): # real signature unknown; restored from __doc__
""" D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. """
pass
def has_key(self, k): # real signature unknown; restored from __doc__
""" D.has_key(k) -> True if D has a key k, else False """
return False
def items(self): # real signature unknown; restored from __doc__
""" D.items() -> list of D's (key, value) pairs, as 2-tuples """
return []
def iteritems(self): # real signature unknown; restored from __doc__
""" D.iteritems() -> an iterator over the (key, value) items of D """
pass
def iterkeys(self): # real signature unknown; restored from __doc__
""" D.iterkeys() -> an iterator over the keys of D """
pass
def itervalues(self): # real signature unknown; restored from __doc__
""" D.itervalues() -> an iterator over the values of D """
pass
def keys(self): # real signature unknown; restored from __doc__
""" D.keys() -> list of D's keys """
return []
def pop(self, k, d=None): # real signature unknown; restored from __doc__
"""
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised
"""
pass
def popitem(self): # real signature unknown; restored from __doc__
"""
D.popitem() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if D is empty.
"""
pass
def setdefault(self, k, d=None): # real signature unknown; restored from __doc__
""" D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """
pass
def update(self, E=None, **F): # known special case of dict.update
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
"""
pass
def values(self): # real signature unknown; restored from __doc__
""" D.values() -> list of D's values """
return []
def viewitems(self): # real signature unknown; restored from __doc__
""" D.viewitems() -> a set-like object providing a view on D's items """
pass
def viewkeys(self): # real signature unknown; restored from __doc__
""" D.viewkeys() -> a set-like object providing a view on D's keys """
pass
def viewvalues(self): # real signature unknown; restored from __doc__
""" D.viewvalues() -> an object providing a view on D's values """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, k): # real signature unknown; restored from __doc__
""" D.__contains__(k) -> True if D has a key k, else False """
return False
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, seq=None, **kwargs): # known special case of dict.__init__
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" D.__sizeof__() -> size of D in memory, in bytes """
pass
__hash__ = None
class enumerate(object):
"""
enumerate(iterable[, start]) -> iterator for index, value of iterable
Return an enumerate object. iterable must be another object that supports
iteration. The enumerate object yields pairs containing a count (from
start, which defaults to zero) and a value yielded by the iterable argument.
enumerate is useful for obtaining an indexed list:
(0, seq[0]), (1, seq[1]), (2, seq[2]), ...
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, iterable, start=0): # known special case of enumerate.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class file(object):
"""
file(name[, mode[, buffering]]) -> file object
Open a file. The mode can be 'r', 'w' or 'a' for reading (default),
writing or appending. The file will be created if it doesn't exist
when opened for writing or appending; it will be truncated when
opened for writing. Add a 'b' to the mode for binary files.
Add a '+' to the mode to allow simultaneous reading and writing.
If the buffering argument is given, 0 means unbuffered, 1 means line
buffered, and larger numbers specify the buffer size. The preferred way
to open a file is with the builtin open() function.
Add a 'U' to mode to open the file for input with universal newline
support. Any line ending in the input file will be seen as a '\n'
in Python. Also, a file so opened gains the attribute 'newlines';
the value for this attribute is one of None (no newline read yet),
'\r', '\n', '\r\n' or a tuple containing all the newline types seen.
'U' cannot be combined with 'w' or '+' mode.
"""
def close(self): # real signature unknown; restored from __doc__
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
pass
def fileno(self): # real signature unknown; restored from __doc__
"""
fileno() -> integer "file descriptor".
This is needed for lower-level file interfaces, such os.read().
"""
return 0
def flush(self): # real signature unknown; restored from __doc__
""" flush() -> None. Flush the internal I/O buffer. """
pass
def isatty(self): # real signature unknown; restored from __doc__
""" isatty() -> true or false. True if the file is connected to a tty device. """
return False
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def read(self, size=None): # real signature unknown; restored from __doc__
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" readinto() -> Undocumented. Don't use this; it may go away. """
pass
def readline(self, size=None): # real signature unknown; restored from __doc__
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
pass
def readlines(self, size=None): # real signature unknown; restored from __doc__
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
return []
def seek(self, offset, whence=None): # real signature unknown; restored from __doc__
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
pass
def tell(self): # real signature unknown; restored from __doc__
""" tell() -> current file position, an integer (may be a long integer). """
pass
def truncate(self, size=None): # real signature unknown; restored from __doc__
"""
truncate([size]) -> None. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
"""
pass
def write(self, p_str): # real signature unknown; restored from __doc__
"""
write(str) -> None. Write string str to file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written.
"""
pass
def writelines(self, sequence_of_strings): # real signature unknown; restored from __doc__
"""
writelines(sequence_of_strings) -> None. Write the strings to the file.
Note that newlines are not added. The sequence can be any iterable object
producing strings. This is equivalent to calling write() for each string.
"""
pass
def xreadlines(self): # real signature unknown; restored from __doc__
"""
xreadlines() -> returns self.
For backward compatibility. File objects now include the performance
optimizations previously implemented in the xreadlines module.
"""
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __enter__(self): # real signature unknown; restored from __doc__
""" __enter__() -> self. """
return self
def __exit__(self, *excinfo): # real signature unknown; restored from __doc__
""" __exit__(*excinfo) -> None. Closes the file. """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, name, mode=None, buffering=None): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
closed = property(lambda self: True)
"""True if the file is closed
:type: bool
"""
encoding = property(lambda self: '')
"""file encoding
:type: string
"""
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Unicode error handler"""
mode = property(lambda self: '')
"""file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added)
:type: string
"""
name = property(lambda self: '')
"""file name
:type: string
"""
newlines = property(lambda self: '')
"""end-of-line convention used in this file
:type: string
"""
softspace = property(lambda self: True)
"""flag indicating that a space needs to be printed; used by print
:type: bool
"""
class float(object):
"""
float(x) -> floating point number
Convert a string or number to a floating point number, if possible.
"""
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
float.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
float and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> (10.0).as_integer_ratio()
(10, 1)
>>> (0.0).as_integer_ratio()
(0, 1)
>>> (-.25).as_integer_ratio()
(-1, 4)
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Return self, the complex conjugate of any float. """
pass
def fromhex(self, string): # real signature unknown; restored from __doc__
"""
float.fromhex(string) -> float
Create a floating-point number from a hexadecimal string.
>>> float.fromhex('0x1.ffffp10')
2047.984375
>>> float.fromhex('-0x1p-1074')
-4.9406564584124654e-324
"""
return 0.0
def hex(self): # real signature unknown; restored from __doc__
"""
float.hex() -> string
Return a hexadecimal representation of a floating-point number.
>>> (-0.1).hex()
'-0x1.999999999999ap-4'
>>> 3.14159.hex()
'0x1.921f9f01b866ep+1'
"""
return ""
def is_integer(self, *args, **kwargs): # real signature unknown
""" Return True if the float is an integer. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
float.__format__(format_spec) -> string
Formats the float according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getformat__(self, typestr): # real signature unknown; restored from __doc__
"""
float.__getformat__(typestr) -> string
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. This function returns whichever of
'unknown', 'IEEE, big-endian' or 'IEEE, little-endian' best describes the
format of floating point numbers used by the C type named by typestr.
"""
return ""
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __setformat__(self, typestr, fmt): # real signature unknown; restored from __doc__
"""
float.__setformat__(typestr, fmt) -> None
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. fmt must be one of 'unknown',
'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be
one of the latter two if it appears to match the underlying C reality.
Override the automatic determination of C-level floating point type.
This affects how floats are converted to and from binary strings.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Return the Integral closest to x between 0 and x. """
pass
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class frozenset(object):
"""
frozenset() -> empty frozenset object
frozenset(iterable) -> frozenset object
Build an immutable unordered collection of unique elements.
"""
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of frozenset.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class list(object):
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
"""
def append(self, p_object): # real signature unknown; restored from __doc__
""" L.append(object) -- append object to end """
pass
def count(self, value): # real signature unknown; restored from __doc__
""" L.count(value) -> integer -- return number of occurrences of value """
return 0
def extend(self, iterable): # real signature unknown; restored from __doc__
""" L.extend(iterable) -- extend list by appending elements from the iterable """
pass
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
L.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def insert(self, index, p_object): # real signature unknown; restored from __doc__
""" L.insert(index, object) -- insert object before index """
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
L.pop([index]) -> item -- remove and return item at index (default last).
Raises IndexError if list is empty or index is out of range.
"""
pass
def remove(self, value): # real signature unknown; restored from __doc__
"""
L.remove(value) -- remove first occurrence of value.
Raises ValueError if the value is not present.
"""
pass
def reverse(self): # real signature unknown; restored from __doc__
""" L.reverse() -- reverse *IN PLACE* """
pass
def sort(self, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
"""
L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*;
cmp(x, y) -> -1, 0, 1
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, seq=()): # known special case of list.__init__
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self): # real signature unknown; restored from __doc__
""" L.__reversed__() -- return a reverse iterator over the list """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" L.__sizeof__() -- size of L in memory, in bytes """
pass
__hash__ = None
class long(object):
"""
long(x=0) -> long
long(x, base=10) -> long
Convert a number or string to a long integer, or return 0L if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4L
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
long.bit_length() -> int or long
Number of bits necessary to represent self in binary.
>>> bin(37L)
'0b100101'
>>> (37L).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any long. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x=0): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class memoryview(object):
"""
memoryview(object)
Create a new memoryview object which references the given object.
"""
def tobytes(self, *args, **kwargs): # real signature unknown
pass
def tolist(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, p_object): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class property(object):
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
"""
def deleter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the deleter on a property. """
pass
def getter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the getter on a property. """
pass
def setter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the setter on a property. """
pass
def __delete__(self, obj): # real signature unknown; restored from __doc__
""" descr.__delete__(obj) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, fget=None, fset=None, fdel=None, doc=None): # known special case of property.__init__
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __set__(self, obj, value): # real signature unknown; restored from __doc__
""" descr.__set__(obj, value) """
pass
fdel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class reversed(object):
"""
reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, sequence): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __length_hint__(self, *args, **kwargs): # real signature unknown
""" Private method returning an estimate of len(list(it)). """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class set(object):
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
"""
def add(self, *args, **kwargs): # real signature unknown
"""
Add an element to a set.
This has no effect if the element is already present.
"""
pass
def clear(self, *args, **kwargs): # real signature unknown
""" Remove all elements from this set. """
pass
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def difference_update(self, *args, **kwargs): # real signature unknown
""" Remove all elements of another set from this set. """
pass
def discard(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def intersection_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the intersection of itself and another. """
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def pop(self, *args, **kwargs): # real signature unknown
"""
Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
"""
pass
def remove(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def symmetric_difference_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the symmetric difference of itself and another. """
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def update(self, *args, **kwargs): # real signature unknown
""" Update a set with the union of itself and others. """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iand__(self, y): # real signature unknown; restored from __doc__
""" x.__iand__(y) <==> x&=y """
pass
def __init__(self, seq=()): # known special case of set.__init__
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
# (copied from class doc)
"""
pass
def __ior__(self, y): # real signature unknown; restored from __doc__
""" x.__ior__(y) <==> x|=y """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __ixor__(self, y): # real signature unknown; restored from __doc__
""" x.__ixor__(y) <==> x^=y """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
__hash__ = None
class slice(object):
"""
slice(stop)
slice(start, stop[, step])
Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).
"""
def indices(self, len): # real signature unknown; restored from __doc__
"""
S.indices(len) -> (start, stop, stride)
Assuming a sequence of length len, calculate the start and stop
indices, and the stride length of the extended slice described by
S. Out of bounds indices are clipped in a manner consistent with the
handling of normal slices.
"""
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
start = property(lambda self: 0)
""":type: int"""
step = property(lambda self: 0)
""":type: int"""
stop = property(lambda self: 0)
""":type: int"""
class staticmethod(object):
"""
staticmethod(function) -> method
Convert a function to be a static method.
A static method does not receive an implicit first argument.
To declare a static method, use this idiom:
class C:
def f(arg1, arg2, ...): ...
f = staticmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
Static methods in Python are similar to those found in Java or C++.
For a more advanced concept, see the classmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class super(object):
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, type1, type2=None): # known special case of super.__init__
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
__self_class__ = property(lambda self: type(object))
"""the type of the instance invoking super(); may be None
:type: type
"""
__self__ = property(lambda self: type(object))
"""the instance invoking super(); may be None
:type: type
"""
__thisclass__ = property(lambda self: type(object))
"""the class invoking super()
:type: type
"""
class tuple(object):
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
"""
def count(self, value): # real signature unknown; restored from __doc__
""" T.count(value) -> integer -- return number of occurrences of value """
return 0
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of tuple.__init__
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" T.__sizeof__() -- size of T in memory, in bytes """
pass
class type(object):
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
"""
def mro(self): # real signature unknown; restored from __doc__
"""
mro() -> list
return a type's method resolution order
"""
return []
def __call__(self, *more): # real signature unknown; restored from __doc__
""" x.__call__(...) <==> x(...) """
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(cls, what, bases=None, dict=None): # known special case of type.__init__
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
# (copied from class doc)
"""
pass
def __instancecheck__(self): # real signature unknown; restored from __doc__
"""
__instancecheck__() -> bool
check if an object is an instance
"""
return False
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __subclasscheck__(self): # real signature unknown; restored from __doc__
"""
__subclasscheck__() -> bool
check if a class is a subclass
"""
return False
def __subclasses__(self): # real signature unknown; restored from __doc__
""" __subclasses__() -> list of immediate subclasses """
return []
__abstractmethods__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__bases__ = (
object,
)
__base__ = object
__basicsize__ = 872
__dictoffset__ = 264
__dict__ = None # (!) real value is ''
__flags__ = 2148423147
__itemsize__ = 40
__mro__ = (
None, # (!) forward: type, real value is ''
object,
)
__name__ = 'type'
__weakrefoffset__ = 368
class unicode(basestring):
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> unicode
Return a capitalized version of S, i.e. make the first character
have upper case and the rest lower case.
"""
return u""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> unicode
Return S centered in a Unicode string of length width. Padding is
done using the specified fill character (default is a space)
"""
return u""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
Unicode string S[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> string or unicode
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return ""
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> string or unicode
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
return ""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> unicode
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return u""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(*args, **kwargs): # known special case of unicode.format
"""
S.format(*args, **kwargs) -> unicode
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdecimal(self): # real signature unknown; restored from __doc__
"""
S.isdecimal() -> bool
Return True if there are only decimal characters in S,
False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isnumeric(self): # real signature unknown; restored from __doc__
"""
S.isnumeric() -> bool
Return True if there are only numeric characters in S,
False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. upper- and titlecase characters may only
follow uncased characters and lowercase characters only cased ones.
Return False otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> unicode
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return u""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> int
Return S left-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return 0
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> unicode
Return a copy of the string S converted to lowercase.
"""
return u""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> unicode
Return a copy of S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return u""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> unicode
Return S right-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return u""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string, starting at the end of the string and
working to the front. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> unicode
Return a copy of S with uppercase characters converted to lowercase
and vice versa.
"""
return u""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> unicode
Return a titlecased version of S, i.e. words start with title case
characters, all remaining cased characters have lower case.
"""
return u""
def translate(self, table): # real signature unknown; restored from __doc__
"""
S.translate(table) -> unicode
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, Unicode strings or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
return u""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> unicode
Return a copy of S converted to uppercase.
"""
return u""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> unicode
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return u""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> unicode
Return a formatted version of S as described by format_spec.
"""
return u""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=u'', encoding=None, errors='strict'): # known special case of unicode.__init__
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class xrange(object):
"""
xrange(stop) -> xrange object
xrange(start, stop[, step]) -> xrange object
Like range(), but instead of returning a list, returns an object that
generates the numbers in the range on demand. For looping, this is
slightly faster than range() and more memory efficient.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self, *args, **kwargs): # real signature unknown
""" Returns a reverse iterator. """
pass
# variables with complex values
Ellipsis = None # (!) real value is ''
NotImplemented = None # (!) real value is ''
| apache-2.0 |
wangqi/facebookads | python/facebookads/api.py | 1 | 16012 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
api module contains classes that make http requests to Facebook's graph API.
"""
from facebookads.exceptions import FacebookRequestError
from facebookads.session import FacebookSession
import json
import six
import collections
from six.moves import urllib
from six.moves import http_client
class FacebookResponse(object):
"""Encapsulates an http response from Facebook's Graph API."""
def __init__(self, body=None, http_status=None, headers=None, call=None):
"""Initializes the object's internal data.
Args:
body (optional): The response body as text.
http_status (optional): The http status code.
headers (optional): The http headers.
call (optional): The original call that was made.
"""
self._body = body
self._http_status = http_status
self._headers = headers
self._call = call
def body(self):
"""Returns the response body."""
return self._body
def json(self):
"""Returns the response body -- in json if possible."""
try:
return json.loads(self._body)
except (TypeError, ValueError):
return self._body
def headers(self):
"""Return the response headers."""
return self._headers
def etag(self):
"""Returns the ETag header value if it exists."""
if self._headers and 'ETag' in self._headers:
return self._headers['Etag']
else:
return None
def status(self):
"""Returns the http status code of the response."""
return self._http_status
def is_success(self):
"""Returns boolean indicating if the call was successful."""
json_body = self.json()
if isinstance(json_body, collections.Mapping) and 'error' in json_body:
# Is a dictionary, has error in it
return False
elif bool(json_body):
# Has body and no error
return True
elif self._http_status == http_client.NOT_MODIFIED:
# ETAG Hit
return True
elif self._http_status == http_client.OK:
# HTTP Okay
return True
else:
# Something else
return False
def is_failure(self):
"""Returns boolean indicating if the call failed."""
return not self.is_success()
def error(self):
"""
Returns a FacebookRequestError (located in the exceptions module) with
an appropriate debug message.
"""
if self.is_failure():
return FacebookRequestError(
"Call was not successful",
self._call,
self.status(),
self.headers(),
self.body()
)
else:
return None
class FacebookAdsApi(object):
"""Encapsulates session attributes and methods to make API calls.
Attributes:
SDK_VERSION (class): indicating sdk version.
HTTP_METHOD_GET (class): HTTP GET method name.
HTTP_METHOD_POST (class): HTTP POST method name
HTTP_METHOD_DELETE (class): HTTP DELETE method name
HTTP_DEFAULT_HEADERS (class): Default HTTP headers for requests made by
this sdk.
"""
SDK_VERSION = '2.2.3'
API_VERSION = 'v2.2'
HTTP_METHOD_GET = 'GET'
HTTP_METHOD_POST = 'POST'
HTTP_METHOD_DELETE = 'DELETE'
HTTP_DEFAULT_HEADERS = {
'User-Agent': "fb-python-ads-api-sdk-%s" % SDK_VERSION,
}
_default_api = None
_default_account_id = None
def __init__(self, session):
"""Initializes the api instance.
Args:
session: FacebookSession object that contains a requests interface
and attribute GRAPH (the Facebook GRAPH API URL).
"""
self._session = session
self._num_requests_succeeded = 0
self._num_requests_attempted = 0
def get_num_requests_attempted(self):
"""Returns the number of calls attempted."""
return self._num_requests_attempted
def get_num_requests_succeeded(self):
"""Returns the number of calls that succeeded."""
return self._num_requests_succeeded
@classmethod
def init(cls, app_id, app_secret, access_token, account_id=None):
session = FacebookSession(app_id, app_secret, access_token)
api = cls(session)
cls.set_default_api(api)
if account_id is not None:
cls.set_default_account_id(account_id)
@classmethod
def set_default_api(cls, api_instance):
"""Sets the default api instance.
When making calls to the api, objects will revert to using the default
api if one is not specified when initializing the objects.
Args:
api_instance: The instance which to set as default.
"""
cls._default_api = api_instance
@classmethod
def get_default_api(cls):
"""Returns the default api instance."""
return cls._default_api
@classmethod
def set_default_account_id(cls, account_id):
account_id = str(account_id)
if account_id.find('act_') == -1:
raise ValueError(
"Account ID provided in FacebookAdsApi.set_default_account_id "
"expects a string that begins with 'act_'"
)
cls._default_account_id = account_id
@classmethod
def get_default_account_id(cls):
return cls._default_account_id
def call(self, method, path, params=None, headers=None, files=None):
"""Makes an API call.
Args:
method: The HTTP method name (e.g. 'GET').
path: A tuple of path tokens or a full URL string. A tuple will
be translated to a url as follows:
graph_url/tuple[0]/tuple[1]...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
Returns:
A FacebookResponse object containing the response body, headers,
http status, and summary of the call that was made.
Raises:
FacebookResponse.error() if the request failed.
"""
if params is None:
params = {}
if headers is None:
headers = {}
if files is None:
files = {}
self._num_requests_attempted += 1
if not isinstance(path, six.string_types):
# Path is not a full path
path = "%s/%s/%s" % (
self._session.GRAPH,
self.API_VERSION,
'/'.join(map(str, path)),
)
# Include api headers in http request
headers = headers.copy()
headers.update(FacebookAdsApi.HTTP_DEFAULT_HEADERS)
if params:
params = _top_level_param_json_encode(params)
# Get request response and encapsulate it in a FacebookResponse
if method == 'GET' or method == 'DELETE':
response = self._session.requests.request(
method,
path,
params=params,
headers=headers,
files=files,
)
else:
response = self._session.requests.request(
method,
path,
data=params,
headers=headers,
files=files,
)
fb_response = FacebookResponse(
body=response.text,
headers=response.headers,
http_status=response.status_code,
call={
'method': method,
'path': path,
'params': params,
'headers': headers,
'files': files,
},
)
if fb_response.is_failure():
raise fb_response.error()
self._num_requests_succeeded += 1
return fb_response
def new_batch(self):
"""
Returns a new FacebookAdsApiBatch, which when executed will go through
this api.
"""
return FacebookAdsApiBatch(api=self)
class FacebookAdsApiBatch(object):
"""
Exposes methods to build a sequence of calls which can be executed with
a single http request.
Note: Individual exceptions won't be thrown for each call that fails.
The success and failure callback functions corresponding to a call
should handle its success or failure.
"""
def __init__(self, api):
self._api = api
self._files = []
self._batch = []
self._success_callbacks = []
self._failure_callbacks = []
def __len__(self):
return len(self._batch)
def add(
self,
method,
relative_path,
params=None,
headers=None,
files=None,
success=None,
failure=None,
):
"""Adds a call to the batch.
Args:
method: The HTTP method name (e.g. 'GET').
relative_path: A tuple of path tokens or a relative URL string.
A tuple will be translated to a url as follows:
<graph url>/<tuple[0]>/<tuple[1]>...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
success (optional): A callback function which will be called with
the FacebookResponse of this call if the call succeeded.
failure (optional): A callback function which will be called with
the FacebookResponse of this call if the call failed.
Returns:
A dictionary describing the call.
"""
if not isinstance(relative_path, six.string_types):
relative_url = '/'.join(relative_path)
else:
relative_url = relative_path
call = {
'method': method,
'relative_url': relative_url,
}
if params:
params = _top_level_param_json_encode(params)
keyvals = []
for key in params:
keyvals.append("%s=%s" % (key, urllib.parse.quote(params[key])))
call['body'] = '&'.join(keyvals)
if files:
call['attached_files'] = ','.join(files.keys())
if headers:
call['headers'] = []
for header in headers:
batch_formatted_header = {}
batch_formatted_header['name'] = header
batch_formatted_header['value'] = headers[header]
call['headers'].append(batch_formatted_header)
self._batch.append(call)
self._files.append(files)
self._success_callbacks.append(success)
self._failure_callbacks.append(failure)
return call
def execute(self):
"""Makes a batch call to the api associated with this object.
For each individual call response, calls the success or failure callback
function if they were specified.
Note: Does not explicitly raise exceptions. Individual exceptions won't
be thrown for each call that fails. The success and failure callback
functions corresponding to a call should handle its success or failure.
Returns:
If some of the calls have failed, returns a new FacebookAdsApiBatch
object with those calls. Otherwise, returns None.
"""
method = FacebookAdsApi.HTTP_METHOD_POST
path = tuple()
params = {'batch': self._batch}
files = {}
for call_files in self._files:
if call_files:
files.update(call_files)
fb_response = self._api.call(
method,
path,
params=params,
files=files,
)
responses = fb_response.json()
retry_indices = []
for index, response in enumerate(responses):
if response:
if 'body' in response:
body = response['body']
else:
body = None
if 'code' in response:
code = response['code']
else:
code = None
if 'headers' in response:
headers = response['headers']
else:
headers = None
inner_fb_response = FacebookResponse(
body=body,
headers=headers,
http_status=code,
call=self._batch[index],
)
if inner_fb_response.is_success():
if self._success_callbacks[index] is not None:
self._success_callbacks[index](inner_fb_response)
elif self._failure_callbacks[index] is not None:
self._failure_callbacks[index](inner_fb_response)
else:
retry_indices.append(index)
if retry_indices:
new_batch = self.__class__(self._api)
new_batch._files = [self._files[index] for index in retry_indices]
new_batch._batch = [self._batch[index] for index in retry_indices]
new_batch._success_callbacks = [self._success_callbacks[index]
for index in retry_indices]
new_batch._failure_callbacks = [self._failure_callbacks[index]
for index in retry_indices]
return new_batch
else:
return None
def _top_level_param_json_encode(params):
params = params.copy()
for param, value in params.items():
if (
isinstance(value, (collections.Mapping, collections.Sequence, bool))
and not isinstance(value, six.string_types)
):
params[param] = json.dumps(value)
else:
params[param] = value
return params
| apache-2.0 |
Yen-Chung-En/w16b_test | static/Brython3.1.3-20150514-095342/Lib/socket.py | 730 | 14913 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| agpl-3.0 |
oskar456/youtube-dl | youtube_dl/extractor/redbulltv.py | 21 | 4944 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
float_or_none,
int_or_none,
try_get,
# unified_timestamp,
ExtractorError,
)
class RedBullTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?redbull\.tv/(?:video|film|live)/(?:AP-\w+/segment/)?(?P<id>AP-\w+)'
_TESTS = [{
# film
'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc',
'md5': 'fb0445b98aa4394e504b413d98031d1f',
'info_dict': {
'id': 'AP-1Q756YYX51W11',
'ext': 'mp4',
'title': 'ABC of...WRC',
'description': 'md5:5c7ed8f4015c8492ecf64b6ab31e7d31',
'duration': 1582.04,
# 'timestamp': 1488405786,
# 'upload_date': '20170301',
},
}, {
# episode
'url': 'https://www.redbull.tv/video/AP-1PMT5JCWH1W11/grime?playlist=shows:shows-playall:web',
'info_dict': {
'id': 'AP-1PMT5JCWH1W11',
'ext': 'mp4',
'title': 'Grime - Hashtags S2 E4',
'description': 'md5:334b741c8c1ce65be057eab6773c1cf5',
'duration': 904.6,
# 'timestamp': 1487290093,
# 'upload_date': '20170217',
'series': 'Hashtags',
'season_number': 2,
'episode_number': 4,
},
'params': {
'skip_download': True,
},
}, {
# segment
'url': 'https://www.redbull.tv/live/AP-1R5DX49XS1W11/segment/AP-1QSAQJ6V52111/semi-finals',
'info_dict': {
'id': 'AP-1QSAQJ6V52111',
'ext': 'mp4',
'title': 'Semi Finals - Vans Park Series Pro Tour',
'description': 'md5:306a2783cdafa9e65e39aa62f514fd97',
'duration': 11791.991,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.redbull.tv/film/AP-1MSKKF5T92111/in-motion',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
session = self._download_json(
'https://api-v2.redbull.tv/session', video_id,
note='Downloading access token', query={
'build': '4.370.0',
'category': 'personal_computer',
'os_version': '1.0',
'os_family': 'http',
})
if session.get('code') == 'error':
raise ExtractorError('%s said: %s' % (
self.IE_NAME, session['message']))
auth = '%s %s' % (session.get('token_type', 'Bearer'), session['access_token'])
try:
info = self._download_json(
'https://api-v2.redbull.tv/content/%s' % video_id,
video_id, note='Downloading video information',
headers={'Authorization': auth}
)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
error_message = self._parse_json(
e.cause.read().decode(), video_id)['message']
raise ExtractorError('%s said: %s' % (
self.IE_NAME, error_message), expected=True)
raise
video = info['video_product']
title = info['title'].strip()
formats = self._extract_m3u8_formats(
video['url'], video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
self._sort_formats(formats)
subtitles = {}
for _, captions in (try_get(
video, lambda x: x['attachments']['captions'],
dict) or {}).items():
if not captions or not isinstance(captions, list):
continue
for caption in captions:
caption_url = caption.get('url')
if not caption_url:
continue
ext = caption.get('format')
if ext == 'xml':
ext = 'ttml'
subtitles.setdefault(caption.get('lang') or 'en', []).append({
'url': caption_url,
'ext': ext,
})
subheading = info.get('subheading')
if subheading:
title += ' - %s' % subheading
return {
'id': video_id,
'title': title,
'description': info.get('long_description') or info.get(
'short_description'),
'duration': float_or_none(video.get('duration'), scale=1000),
# 'timestamp': unified_timestamp(info.get('published')),
'series': info.get('show_title'),
'season_number': int_or_none(info.get('season_number')),
'episode_number': int_or_none(info.get('episode_number')),
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
jefflyn/buddha | src/mlia/Ch04/bayes.py | 3 | 7076 | '''
Created on Oct 19, 2010
@author: Peter
'''
from numpy import *
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1 is abusive, 0 not
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set([]) #create empty set
for document in dataSet:
vocabSet = vocabSet | set(document) #union of the two sets
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else: print "the word: %s is not in my Vocabulary!" % word
return returnVec
def trainNB0(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs)
p0Num = ones(numWords); p1Num = ones(numWords) #change to ones()
p0Denom = 2.0; p1Denom = 2.0 #change to 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = log(p1Num/p1Denom) #change to log()
p0Vect = log(p0Num/p0Denom) #change to log()
return p0Vect,p1Vect,pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)#create vocabulary
trainingSet = range(50); testSet=[] #create test set
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print "classification error",docList[docIndex]
print 'the error rate is: ',float(errorCount)/len(testSet)
#return vocabList,fullText
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)#create vocabulary
top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0])
trainingSet = range(2*minLen); testSet=[] #create test set
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print 'the error rate is: ',float(errorCount)/len(testSet)
return vocabList,p0V,p1V
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[]; topSF=[]
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
print "SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**"
for item in sortedSF:
print item[0]
sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
print "NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**"
for item in sortedNY:
print item[0]
| artistic-2.0 |
mahak/keystone | keystone/tests/unit/ksfixtures/backendloader.py | 2 | 1383 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
from keystone import auth
import keystone.server
class BackendLoader(fixtures.Fixture):
"""Initialize each manager and assigns them to an attribute."""
def __init__(self, testcase):
super(BackendLoader, self).__init__()
self._testcase = testcase
def setUp(self):
super(BackendLoader, self).setUp()
self.clear_auth_plugin_registry()
drivers, _unused = keystone.server.setup_backends()
for manager_name, manager in drivers.items():
setattr(self._testcase, manager_name, manager)
self.addCleanup(self._testcase.cleanup_instance(*list(drivers.keys())))
del self._testcase # break circular reference
def clear_auth_plugin_registry(self):
auth.core.AUTH_METHODS.clear()
auth.core.AUTH_PLUGINS_LOADED = False
| apache-2.0 |
onlinecity/phone-iso3166 | get_e212_wiki.py | 1 | 1904 | from bs4 import BeautifulSoup
from collections import defaultdict
import requests
from phone_iso3166.country import network_country
from phone_iso3166.errors import InvalidNetwork
from io import StringIO
from pprint import pformat
names_out_file = 'phone_iso3166/e212_names.py'
html = requests.get('https://en.wikipedia.org/wiki/Mobile_country_code')
soup = BeautifulSoup(html.content, 'html.parser')
networkdict = defaultdict(lambda: defaultdict(tuple))
for row in soup.find_all('table', {'class': 'wikitable'}):
rows = row.find_all('td')
mnc_info = [rows[x:x + 7] for x in range(0, len(rows), 7)]
for info in mnc_info:
try:
mcc = int(info[0].text)
mnc = int(info[1].text)
operator_name = info[3].text
country_code = network_country(mcc, mnc)
if info[4].text != 'Not operational':
networkdict[mcc][mnc] = (country_code, operator_name)
except (ValueError, InvalidNetwork):
# Skip empty tables
pass
# Turn in to regular dict so it can be written.
operators = {k: dict(v) for k, v in networkdict.items()}
# Collect all MNC/MCC pairs under country code.
countriesdict = defaultdict(list)
for mcc, mncs in operators.items():
for mnc, c in mncs.items():
countriesdict[c[0]].append((mcc, mnc))
countriesdict = {k: v for k, v in countriesdict.items()}
e212_complementary = StringIO()
oprmap = StringIO()
oprmap.write('#!/usr/bin/env python\n')
oprmap.write('# -*- coding: utf-8 -*-\n\n')
oprmap.write('# Generated by get_e212.py\n')
oprmap.write('# Based on https://en.wikipedia.org/wiki/Mobile_country_code\n')
oprmap.write('operators = \\\n')
oprmap.write(pformat(operators))
oprmap.write('\n\n\ncountries = \\\n')
oprmap.write(pformat(countriesdict))
with open(names_out_file, 'w') as f:
f.write(oprmap.getvalue() + '\n')
print('Wrote ' + names_out_file)
| mit |
meles5/qutebrowser | tests/unit/mainwindow/statusbar/test_url.py | 6 | 5698 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Clayton Craft (craftyguy) <craftyguy@gmail.com>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test Statusbar url."""
import pytest
from qutebrowser.utils import usertypes
from qutebrowser.mainwindow.statusbar import url
from PyQt5.QtCore import QUrl
@pytest.fixture
def url_widget(qtbot, monkeypatch, config_stub):
"""Fixture providing a Url widget."""
config_stub.data = {
'colors': {
'statusbar.url.bg': 'white',
'statusbar.url.fg': 'black',
'statusbar.url.fg.success': 'yellow',
'statusbar.url.fg.success.https': 'green',
'statusbar.url.fg.error': 'red',
'statusbar.url.fg.warn': 'orange',
'statusbar.url.fg.hover': 'blue'
},
'fonts': {},
}
monkeypatch.setattr(
'qutebrowser.mainwindow.statusbar.url.style.config', config_stub)
widget = url.UrlText()
qtbot.add_widget(widget)
assert not widget.isVisible()
return widget
@pytest.mark.parametrize('qurl', [
QUrl('http://abc123.com/this/awesome/url.html'),
QUrl('https://supersecret.gov/nsa/files.txt'),
None
])
def test_set_url(url_widget, qurl):
"""Test text displayed by the widget."""
url_widget.set_url(qurl)
if qurl is not None:
assert url_widget.text() == qurl.toDisplayString()
else:
assert url_widget.text() == ""
@pytest.mark.parametrize('url_text', [
'http://abc123.com/this/awesome/url.html',
'https://supersecret.gov/nsa/files.txt',
None,
])
def test_set_hover_url(url_widget, url_text):
"""Test text when hovering over a link."""
url_widget.set_hover_url(url_text)
if url_text is not None:
assert url_widget.text() == url_text
assert url_widget._urltype == url.UrlType.hover
else:
assert url_widget.text() == ''
assert url_widget._urltype == url.UrlType.normal
@pytest.mark.parametrize('url_text, expected', [
('http://test.gr/%CE%B1%CE%B2%CE%B3%CE%B4.txt', 'http://test.gr/αβγδ.txt'),
('http://test.ru/%D0%B0%D0%B1%D0%B2%D0%B3.txt', 'http://test.ru/абвг.txt'),
('http://test.com/s%20p%20a%20c%20e.txt', 'http://test.com/s p a c e.txt'),
('http://test.com/%22quotes%22.html', 'http://test.com/%22quotes%22.html'),
('http://username:secret%20password@test.com', 'http://username@test.com'),
('http://example.com%5b/', 'http://example.com%5b/'), # invalid url
])
def test_set_hover_url_encoded(url_widget, url_text, expected):
"""Test text when hovering over a percent encoded link."""
url_widget.set_hover_url(url_text)
assert url_widget.text() == expected
assert url_widget._urltype == url.UrlType.hover
@pytest.mark.parametrize('status, expected', [
(usertypes.LoadStatus.success, url.UrlType.success),
(usertypes.LoadStatus.success_https, url.UrlType.success_https),
(usertypes.LoadStatus.error, url.UrlType.error),
(usertypes.LoadStatus.warn, url.UrlType.warn),
(usertypes.LoadStatus.loading, url.UrlType.normal),
(usertypes.LoadStatus.none, url.UrlType.normal)
])
def test_on_load_status_changed(url_widget, status, expected):
"""Test text when status is changed."""
url_widget.set_url(QUrl('www.example.com'))
url_widget.on_load_status_changed(status.name)
assert url_widget._urltype == expected
@pytest.mark.parametrize('load_status, qurl', [
(url.UrlType.success, QUrl('http://abc123.com/this/awesome/url.html')),
(url.UrlType.success, QUrl('http://reddit.com/r/linux')),
(url.UrlType.success_https, QUrl('www.google.com')),
(url.UrlType.success_https, QUrl('https://supersecret.gov/nsa/files.txt')),
(url.UrlType.warn, QUrl('www.shadysite.org/some/file/with/issues.htm')),
(url.UrlType.error, QUrl('invalid::/url')),
])
def test_on_tab_changed(url_widget, fake_web_tab, load_status, qurl):
tab_widget = fake_web_tab(load_status=load_status, url=qurl)
url_widget.on_tab_changed(tab_widget)
assert url_widget._urltype == load_status
assert url_widget.text() == qurl.toDisplayString()
@pytest.mark.parametrize('qurl, load_status, expected_status', [
(
QUrl('http://abc123.com/this/awesome/url.html'),
usertypes.LoadStatus.success,
url.UrlType.success
),
(
QUrl('https://supersecret.gov/nsa/files.txt'),
usertypes.LoadStatus.success_https,
url.UrlType.success_https
),
(
QUrl('http://www.qutebrowser.org/CONTRIBUTING.html'),
usertypes.LoadStatus.loading,
url.UrlType.normal
),
(
QUrl('www.whatisthisurl.com'),
usertypes.LoadStatus.warn,
url.UrlType.warn
),
])
def test_normal_url(url_widget, qurl, load_status, expected_status):
url_widget.set_url(qurl)
url_widget.on_load_status_changed(load_status.name)
url_widget.set_hover_url(qurl.toDisplayString())
url_widget.set_hover_url("")
assert url_widget.text() == qurl.toDisplayString()
assert url_widget._urltype == expected_status
| gpl-3.0 |
wskplho/sl4a | python/src/Lib/plat-irix5/jpeg.py | 66 | 3675 | # Implement 'jpeg' interface using SGI's compression library
# XXX Options 'smooth' and 'optimize' are ignored.
# XXX It appears that compressing grayscale images doesn't work right;
# XXX the resulting file causes weirdness.
from warnings import warnpy3k
warnpy3k("the jpeg module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
class error(Exception):
pass
options = {'quality': 75, 'optimize': 0, 'smooth': 0, 'forcegray': 0}
comp = None
decomp = None
def compress(imgdata, width, height, bytesperpixel):
global comp
import cl
if comp is None: comp = cl.OpenCompressor(cl.JPEG)
if bytesperpixel == 1:
format = cl.GRAYSCALE
elif bytesperpixel == 4:
format = cl.RGBX
if options['forcegray']:
iformat = cl.GRAYSCALE
else:
iformat = cl.YUV
# XXX How to support 'optimize'?
params = [cl.IMAGE_WIDTH, width, cl.IMAGE_HEIGHT, height, \
cl.ORIGINAL_FORMAT, format, \
cl.ORIENTATION, cl.BOTTOM_UP, \
cl.QUALITY_FACTOR, options['quality'], \
cl.INTERNAL_FORMAT, iformat, \
]
comp.SetParams(params)
jpegdata = comp.Compress(1, imgdata)
return jpegdata
def decompress(jpegdata):
global decomp
import cl
if decomp is None: decomp = cl.OpenDecompressor(cl.JPEG)
headersize = decomp.ReadHeader(jpegdata)
params = [cl.IMAGE_WIDTH, 0, cl.IMAGE_HEIGHT, 0, cl.INTERNAL_FORMAT, 0]
decomp.GetParams(params)
width, height, format = params[1], params[3], params[5]
if format == cl.GRAYSCALE or options['forcegray']:
format = cl.GRAYSCALE
bytesperpixel = 1
else:
format = cl.RGBX
bytesperpixel = 4
# XXX How to support 'smooth'?
params = [cl.ORIGINAL_FORMAT, format, \
cl.ORIENTATION, cl.BOTTOM_UP, \
cl.FRAME_BUFFER_SIZE, width*height*bytesperpixel]
decomp.SetParams(params)
imgdata = decomp.Decompress(1, jpegdata)
return imgdata, width, height, bytesperpixel
def setoption(name, value):
if type(value) is not type(0):
raise TypeError, 'jpeg.setoption: numeric options only'
if name == 'forcegrey':
name = 'forcegray'
if not options.has_key(name):
raise KeyError, 'jpeg.setoption: unknown option name'
options[name] = int(value)
def test():
import sys
if sys.argv[1:2] == ['-g']:
del sys.argv[1]
setoption('forcegray', 1)
if not sys.argv[1:]:
sys.argv.append('/usr/local/images/data/jpg/asterix.jpg')
for file in sys.argv[1:]:
show(file)
def show(file):
import gl, GL, DEVICE
jpegdata = open(file, 'r').read()
imgdata, width, height, bytesperpixel = decompress(jpegdata)
gl.foreground()
gl.prefsize(width, height)
win = gl.winopen(file)
if bytesperpixel == 1:
gl.cmode()
gl.pixmode(GL.PM_SIZE, 8)
gl.gconfig()
for i in range(256):
gl.mapcolor(i, i, i, i)
else:
gl.RGBmode()
gl.pixmode(GL.PM_SIZE, 32)
gl.gconfig()
gl.qdevice(DEVICE.REDRAW)
gl.qdevice(DEVICE.ESCKEY)
gl.qdevice(DEVICE.WINQUIT)
gl.qdevice(DEVICE.WINSHUT)
gl.lrectwrite(0, 0, width-1, height-1, imgdata)
while 1:
dev, val = gl.qread()
if dev in (DEVICE.ESCKEY, DEVICE.WINSHUT, DEVICE.WINQUIT):
break
if dev == DEVICE.REDRAW:
gl.lrectwrite(0, 0, width-1, height-1, imgdata)
gl.winclose(win)
# Now test the compression and write the result to a fixed filename
newjpegdata = compress(imgdata, width, height, bytesperpixel)
open('/tmp/j.jpg', 'w').write(newjpegdata)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.