text
stringlengths 2
999k
|
|---|
import re
from st2actions.runners.pythonrunner import Action
REGEX_PATTERN = '^([0-9A-Fa-f]+)$'
class ExtractAction(Action):
def run(self, text):
words = [word for word in text.split(' ') if len(word) >= 32]
for word in words:
if re.search(REGEX_PATTERN, word):
return word
return None
|
'''
import examplemod as mod # Here we have to used object.function() format
mod.do_a_thing()
'''
from mod_dir.examplemod import do_a_thing, do_another_thing # Here we have to used object.function() format
# thr examplemod.py is in directory mod_dir --> this is basically working with modules
do_a_thing()
do_another_thing()
|
# Copyright 2021 ZBW – Leibniz Information Centre for Economics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def X():
return ['txt0', 'txt1']
|
import unittest
import numpy as np
import pandas as pd
import datetime
import collections
from dama.data.it import Iterator, BatchIterator, Slice
from dama.data.ds import Data
from dama.connexions.core import GroupManager
from dama.fmtypes import DEFAUL_GROUP_NAME
from dama.utils.core import Chunks
from dama.utils.seq import grouper_chunk
from dama.connexions.core import ListConn
import numbers
def stream():
i = 0
while True:
yield i
i += 1
class TestIteratorIter(unittest.TestCase):
def test_iteration(self):
array = np.arange(0, 10)
it = Iterator(array)
for i, e in enumerate(it):
self.assertEqual(e, i)
def test_iteration_dtype(self):
array = [1, 2, 3, 4.0, 'xxx', 1, 3, 4, 5]
it = Iterator(array, dtypes=np.dtype([("x0", np.dtype("float"))]))
self.assertEqual(it.groups, ("x0",))
self.assertEqual(it.dtype, np.dtype("float"))
for a, e in zip(array, it):
self.assertEqual(a, e)
def test_iteration_dtype2(self):
array = [1, 2, 3, 4.0, 'xxx', [1], [[2, 3]]]
it = Iterator(array, dtypes=np.dtype([(DEFAUL_GROUP_NAME, np.dtype("float"))]))
for a, e in zip(array, it):
self.assertEqual(a, e)
def test_nshape(self):
array = np.zeros((20, 2)) + 1
it = Iterator(array)
self.assertEqual(it.shape, (20, 2))
self.assertEqual(it[:10].shape, (10, 2))
def test_shape(self):
data = np.random.rand(10, 3)
it = Iterator(data)
self.assertEqual(it.shape, (10, 3))
data = np.random.rand(10)
it = Iterator(data)
self.assertEqual(it.shape, (10,))
data = np.random.rand(10, 3, 3)
it = Iterator(data)
self.assertEqual(it.shape, (10, 3, 3))
def test_iterator_cut(self):
array = np.arange(0, 100)
it = Iterator(array)
list_ = []
for e in it[:10]:
list_.append(e)
self.assertEqual((list_ == array[:10]).all(), True)
def test_flat_all(self):
array = np.empty((20, 2), dtype=np.dtype(int))
array[:, 0] = np.arange(0, 20)
array[:, 1] = np.arange(0, 20) + 2
it = Iterator(array)
flat_array = array.reshape(-1)
for i, e in enumerate(it.flat()):
self.assertEqual(e, flat_array[i])
def test_it_attrs(self):
it = Iterator(stream())
self.assertEqual(it.dtype, int)
self.assertEqual(it.dtypes, [(DEFAUL_GROUP_NAME, np.dtype('int64'))])
self.assertEqual(it.length, np.inf)
self.assertEqual(it.shape, (np.inf,))
self.assertEqual(it.num_splits(), np.inf)
self.assertEqual(it.type_elem, numbers.Number)
self.assertEqual(it.groups, (DEFAUL_GROUP_NAME,))
def test_it_attrs_length(self):
it = Iterator(stream())[:10]
self.assertEqual(it.dtype, int)
self.assertEqual(it.dtypes, [(DEFAUL_GROUP_NAME, np.dtype('int64'))])
self.assertEqual(it.length, 10)
self.assertEqual(it.shape, (10,))
self.assertEqual(it.num_splits(), 10)
self.assertEqual(it.type_elem, numbers.Number)
self.assertEqual(it.groups, (DEFAUL_GROUP_NAME,))
def test_sample(self):
order = (i for i in range(20))
array = np.arange(20)
it = Iterator(order)
samples = []
samples_it = it.sample(5)
self.assertEqual(isinstance(samples_it, Iterator), True)
for e in samples_it:
samples.append(e)
self.assertEqual((samples == array[:5]).all(), False)
def test_gen_weights(self):
order = (i for i in range(4))
it = Iterator(order)
it_0 = it.weights_gen(it, None, lambda x: x % 2 + 1)
self.assertCountEqual(list(it_0), [(0, 1), (1, 2), (2, 1), (3, 2)])
def fn(v):
if v == 0:
return 1
else:
return 99
data = np.zeros((20, 4)) + [1, 2, 3, 0]
data[:, 3] = np.random.rand(1, 20) > .5
it = Iterator(data)
w_v = list(it.weights_gen(it, 3, fn))
self.assertEqual(w_v[0][1], fn(data[0][3]))
def test_groups(self):
data = np.asarray([[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype='int')
data = pd.DataFrame(data, columns=['x', 'y'])
it = Iterator(data)
self.assertCountEqual(it.groups, ['x', 'y'])
def test_sliding_window(self):
it = Iterator(range(100))
i = 1
j = 2
for e in it.window():
self.assertCountEqual(e, [i, j])
i += 1
j += 1
def test_shape_list(self):
def _it():
for x in range(100):
e = np.random.rand(3, 3)
yield (x, e, [1, 2])
it = Iterator(_it(), dtypes=np.dtype([("x", np.dtype("float")), ("y", np.dtype("float")),
("z", np.dtype("int"))]))
self.assertEqual(it.shape["x"], (np.inf, ))
self.assertEqual(it.shape["y"], (np.inf, 3, 3))
self.assertEqual(it.shape["z"], (np.inf, 2))
def test_shape_list_one_group(self):
def _it():
for _ in range(100):
yield ([1], [2], [3])
it = Iterator(_it(), dtypes=np.dtype([("x", np.dtype("float"))]))
self.assertEqual(it.shape["x"], (np.inf, 3, 1))
def test_list_dtype(self):
l = [["a0", 0, "c0", 0], ["a1", 1, "c1", 1], ["a2", 2, "c2", 0]]
dtypes = np.dtype([("a", np.dtype(object)), ("b", np.dtype(int)), ("c", np.dtype(object)),
("d", np.dtype(bool))])
it = Iterator(l, dtypes=dtypes).batchs(3)
for e in it:
df_v = e.batch.to_df().values
array = np.asarray(l)
self.assertEqual((df_v[:, 0] == array[:, 0]).all(), True)
self.assertEqual((df_v[:, 1] == array[:, 1].astype(int)).all(), True)
self.assertEqual((df_v[:, 2] == array[:, 2]).all(), True)
self.assertEqual((df_v[:, 3] == array[:, 3].astype(bool)).all(), True)
def test_batch_iterator_from(self):
x = np.random.rand(20)
batch_size = 5
dtypes = np.dtype([("x", np.dtype(float)), ("y", np.dtype(float))])
def iterator(x):
init = 0
end = batch_size
while end <= x.shape[0]:
yield (x[init:end], x[init:end]+1)
init = end
end += batch_size
def conn_it(iterator, dtypes):
for it in iterator:
list_conn = ListConn([], dtypes)
list_conn[0] = it[0]
list_conn[1] = it[1]
yield list_conn
b_it = BatchIterator.from_batchs(conn_it(iterator(x), dtypes), length=len(x), from_batch_size=batch_size,
dtypes=dtypes, to_slice=True)
init = 0
end = batch_size
for e in b_it:
self.assertEqual((e.batch.to_ndarray()[:, 0] == x[init:end]).all(), True)
self.assertEqual((e.batch.to_ndarray()[:, 1] == x[init:end]+1).all(), True)
init = end
end += batch_size
class TestIteratorBatch(unittest.TestCase):
def test_iteration_batch(self):
array = np.arange(0, 10)
it = Iterator(array).batchs(chunks=(3, ))
for slice_obj in it:
self.assertEqual(type(slice_obj), Slice)
self.assertEqual((slice_obj.batch[it.groups[0]].to_ndarray() == array[slice_obj.slice]).all(), True)
def test_mixtype_batch(self):
array = [1, 2, 3, 4.0, 'xxx', 1, 3, 4, 5]
np_array = np.asarray(array, dtype=object)
it = Iterator(array, dtypes=np.dtype([(DEFAUL_GROUP_NAME, np.dtype("object"))])).batchs(chunks=(3, ))
for slice_obj in it:
self.assertEqual((slice_obj.batch[it.groups[0]].to_ndarray() == np_array[slice_obj.slice]).all(), True)
def test_mixtype_multidim_batch(self):
array = [1, 2, 3, 4.0, 'xxx', [1], [[2, 3]]]
np_array = np.asarray(array, dtype=object)
it = Iterator(array, dtypes=np.dtype([(DEFAUL_GROUP_NAME, np.dtype("object"))])).batchs(chunks=(3, ))
for slice_obj in it:
self.assertEqual((slice_obj.batch[it.groups[0]].to_ndarray() == np_array[slice_obj.slice]).all(), True)
def test_batch_dtype(self):
array = np.random.rand(10, 2)
dtypes = np.dtype([(DEFAUL_GROUP_NAME, np.dtype("float")), ("g1", np.dtype("float"))])
it = Iterator(array, dtypes=dtypes).batchs(chunks=(3, ))
for slice_obj in it:
self.assertEqual((slice_obj.batch["g1"].to_ndarray() == array[:, 1][slice_obj.slice]).all(), True)
self.assertEqual((slice_obj.batch[DEFAUL_GROUP_NAME].to_ndarray() == array[:, 0][slice_obj.slice]).all(), True)
def test_batch_it_attrs(self):
df = pd.DataFrame({"x": np.arange(0, 10), "y": np.arange(10, 20)})
it = Iterator(df).batchs(chunks=(3, ))
self.assertEqual(it.dtype, int)
self.assertEqual(it.length, 10)
self.assertEqual(it.shape, (10, 2))
self.assertEqual(it.batch_size, 3)
self.assertEqual(it.num_splits(), 4)
self.assertEqual(it.batch_shape(), [3, 2])
self.assertEqual((it.groups == df.columns.values).all(), True)
def test_batch_it_attrs_length(self):
it = Iterator(stream()).batchs(chunks=(3, ))
self.assertEqual(it.dtype, int)
self.assertEqual(it.dtypes, [(DEFAUL_GROUP_NAME, np.dtype('int64'))])
self.assertEqual(it.length, np.inf)
self.assertEqual(it.shape, (np.inf,))
self.assertEqual(it.batch_size, 3)
self.assertEqual(it.num_splits(), 0)
self.assertEqual(it.batch_shape(), [3])
self.assertEqual(it.groups, (DEFAUL_GROUP_NAME,))
def test_shape(self):
data = np.random.rand(10)
it = Iterator(data).batchs(chunks=(2, ))
self.assertEqual(it.shape, (10,))
data = np.random.rand(10, 3)
it = Iterator(data).batchs(chunks=(2, ))
self.assertEqual(it.shape, (10, 3))
data = np.random.rand(10, 3, 3)
it = Iterator(data).batchs(chunks=(2, ))
self.assertEqual(it.shape, (10, 3, 3))
def test_batchs_values(self):
batch_size = 3
m = np.asarray([[1, '5.0'], [2, '3'], [4, 'C']])
data = pd.DataFrame(m, columns=['A', 'B'])
it = Iterator(data).batchs(chunks=(batch_size, ))
self.assertEqual(it.batch_size, batch_size)
for smx in it:
for i, row in enumerate(smx.batch):
self.assertEqual((row.to_ndarray()[0] == m[i]).all(), True)
def test_df_batchs(self):
batch_size = 2
data = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float')
it = Iterator(data, dtypes=np.dtype([('x', np.dtype('float'))])).batchs(chunks=(batch_size, ))
batch = next(it).batch
self.assertCountEqual(batch['x'].to_ndarray(), [1, 2])
def test_unique(self):
it = Iterator([1, 2, 3, 4, 4, 4, 5, 6, 3, 8, 1])
counter = it.batchs(3).unique()
self.assertEqual(counter[1], 2)
self.assertEqual(counter[2], 1)
self.assertEqual(counter[3], 2)
self.assertEqual(counter[4], 3)
self.assertEqual(counter[5], 1)
self.assertEqual(counter[6], 1)
self.assertEqual(counter[8], 1)
def test_buffer(self):
v = list(range(100))
it = Iterator(v)
buffer_size = 7
i = 0
j = buffer_size
for elems in it.batchs(chunks=(buffer_size, )):
self.assertCountEqual(elems.batch[DEFAUL_GROUP_NAME].to_ndarray(), list(range(i, j)))
i = j
j += buffer_size
if j > 100:
j = 100
def test_iterator_cut(self):
array = np.arange(0, 100)
it = Iterator(array).batchs(chunks=(3, ))
for slice_obj in it[:10]:
self.assertEqual((slice_obj.batch[it.groups[0]].to_ndarray() == array[slice_obj.slice]).all(), True)
def test_flat_all_batch(self):
array = np.empty((20, 2), dtype=np.dtype(int))
array[:, 0] = np.arange(0, 20)
array[:, 1] = np.arange(0, 20) + 1
it = Iterator(array).batchs(chunks=(3, 2))
flat_array = array.reshape(-1)
for i, e in enumerate(it.flat()):
self.assertEqual(e, flat_array[i])
def test_clean_batchs(self):
it = Iterator(((i, 'X', 'Z') for i in range(20))).batchs(chunks=(2, 3))
for i, smx in enumerate(it.clean_batchs()):
self.assertEqual((smx[DEFAUL_GROUP_NAME].to_ndarray() == np.asarray([i, 'X', 'Z'], dtype=object)).all(), True)
def test_sample_batch(self):
order = (i for i in range(20))
array = np.arange(0, 20)
it = Iterator(order).batchs(chunks=(2, ))
samples = []
samples_it = it.sample(5, col=DEFAUL_GROUP_NAME)
self.assertEqual(isinstance(samples_it, Iterator), True)
for e in samples_it:
samples.append(e)
self.assertEqual((samples == array[:5]).all(), False)
def test_one_elem(self):
data = [[1, 2, 'a', 's'], [2, 3, 'c', 'e']]
dtypes = np.dtype([("a", int), ("b", int), ("c", str), ("s", str)])
data = Iterator(data, dtypes=dtypes)
ok_shape = {'a': (2, ), 'b': (2, ), 'c': (2, ), 's': (2, )}
self.assertEqual(data.shape.items(), ok_shape.items())
data = [[1, 2, 'a', 's'], [2, 3, 'c', 'e']]
data = Iterator(data)
ok_shape = {'g0': (2, 4)}
self.assertEqual(data.shape.items(), ok_shape.items())
data = [1, 2, 'a', 's']
dtypes = np.dtype([("a", int), ("b", int), ("c", str), ("s", str)])
data = Iterator(data, dtypes=dtypes)
ok_shape = {'a': (1, ), 'b': (1, ), 'c': (1, ), 's': (1, )}
self.assertEqual(data.shape.items(), ok_shape.items())
data = [1, 2, 'a', 's']
data = Iterator(data)
ok_shape = {'g0': (4, )}
self.assertEqual(data.shape.items(), ok_shape.items())
class TestIteratorToData(unittest.TestCase):
def test_length_array_batch(self):
array = np.asarray([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16], [17, 18], [19, 20]])
it = Iterator(array).batchs(chunks=(3, 2))
with Data(name="test") as data:
data.from_data(it[:10])
self.assertEqual((data[:5].to_ndarray() == array[:5]).all(), True)
def test_stream(self):
it = Iterator(stream())
with Data(name="test", chunks=(3, )) as data:
data.from_data(it[:10])
self.assertCountEqual(data.to_ndarray(), np.arange(0, 10))
def test_stream_batchs(self):
it = Iterator(stream()).batchs(chunks=(3, ))
with Data(name="test") as data:
data.from_data(it[:10])
self.assertCountEqual(data.to_ndarray(), np.arange(0, 10))
def test_multidtype(self):
x0 = np.arange(20)
x1 = (x0 + 1).astype("float")
x2 = x0 + 2
df = pd.DataFrame({"x0": x0, "x1": x1, "x2": x2})
with Data(name="test", chunks=Chunks({"x0": (10, ), "x1": (10, ), "x2": (10, )})) as data:
data.from_data(df)
self.assertEqual((data["x0"][:5].to_ndarray() == x0[:5]).all(), True)
self.assertEqual((data["x1"][:5].to_ndarray() == x1[:5]).all(), True)
self.assertEqual((data["x2"][:5].to_ndarray() == x2[:5]).all(), True)
self.assertEqual(data["x0"].dtype, np.dtype(int))
self.assertEqual(data["x1"].dtype, np.dtype(float))
def test_structured_batchs(self):
x0 = np.zeros(20) + 1
x1 = np.zeros(20) + 2
x2 = np.zeros(20) + 3
df = pd.DataFrame({"x0": x0, "x1": x1, "x2": x2})
with Data(name="test", chunks=(3, )) as data:
data.from_data(df)
self.assertEqual((data["x0"].to_ndarray() == x0).all(), True)
self.assertEqual((data["x1"].to_ndarray() == x1).all(), True)
self.assertEqual((data["x2"].to_ndarray() == x2).all(), True)
def test_multidtype_batchs(self):
x0 = np.zeros(20) + 1
x1 = (np.zeros(20) + 2).astype("int")
x2 = np.zeros(20) + 3
df = pd.DataFrame({"x0": x0, "x1": x1, "x2": x2})
with Data(name="test", chunks=(3, )) as data:
data.from_data(df)
self.assertEqual(data.shape, (20, 3))
self.assertEqual(data["x0"].dtypes["x0"], float)
self.assertEqual(data["x1"].dtypes["x1"], int)
self.assertEqual(data["x2"].dtypes["x2"], float)
def test_to_ndarray_dtype(self):
batch_size = 2
array = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='int')
it = Iterator(array).batchs(batch_size)
with Data(name="test", chunks=(batch_size, )) as data:
data.from_data(it)
array = data.to_ndarray(dtype='float')
self.assertEqual(array.dtype, np.dtype("float"))
data.destroy()
def test_sample_weight(self):
def fn(v):
if v == 0:
return 1
else:
return 90
num_items = 200000
num_samples = 20000
array = np.zeros((num_items, 4)) + [1, 2, 3, 0]
array[:, 3] = np.random.rand(1, num_items) > .5
it = Iterator(array).sample(num_samples, col=3, weight_fn=fn)
with Data(name="test", chunks=(258, 4)) as data:
data.from_data(it)
c = collections.Counter(data.to_ndarray()[:, 3])
self.assertEqual(c[1]/float(num_samples) > .79, True)
def test_empty(self):
it = Iterator([])
with Data(name="test", chunks=(1, )) as data:
data.from_data(it)
self.assertCountEqual(data.to_ndarray(), np.asarray([]))
self.assertEqual(data.shape, (0,))
data.destroy()
def test_datetime(self):
m = [datetime.datetime.today(), datetime.datetime.today(), datetime.datetime.today()]
df = pd.DataFrame(m, columns=['A'])
it = Iterator(m, dtypes=np.dtype([("A", np.dtype('<M8[ns]'))])).batchs(chunks=(2, ))
with Data(name="test") as data:
data.from_data(it)
self.assertCountEqual(data.to_ndarray(), df.values)
def test_abstractds(self):
array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
with Data(name="test", chunks=(3, )) as data:
data.from_data(array)
it = Iterator(data)
for it_array, array_elem in zip(it, array):
self.assertEqual(it_array.to_ndarray(), array_elem)
data.destroy()
def test_batch_ads(self):
with Data(name="test", chunks=(3, )) as data:
data.from_data(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
array_l = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
it = Iterator(data).batchs(chunks=(3, 1))
for batch, array in zip(it.only_data(), array_l):
self.assertCountEqual(batch, array)
data.destroy()
class TestIteratorFromData(unittest.TestCase):
def test_da_group(self):
x = np.random.rand(10)
with Data(name="test", chunks=(5, )) as data:
data.from_data(x)
it = Iterator(data)
self.assertEqual(it.shape, (10,))
self.assertEqual([(g, d) for g, (d, _) in it.dtypes.fields.items()], [(DEFAUL_GROUP_NAME, np.dtype(float))])
def test_da_group_it(self):
x = np.random.rand(10)
with Data(name="test", chunks=(5, )) as data:
data.from_data(x)
it = Iterator(data)
for i, e in enumerate(it):
self.assertEqual(e.to_ndarray(), x[i])
def test_da_group_it_batch(self):
x = np.random.rand(10)
with Data(name="test", chunks=(5, )) as data:
data.from_data(x)
it = Iterator(data).batchs(chunks=(5, ))
for e in it:
self.assertEqual((e.batch.to_ndarray() == x[e.slice]).all(), True)
class TestIteratorLoop(unittest.TestCase):
def test_cycle_it(self):
array = np.arange(10)
with Data(name="test", chunks=(3, )) as data:
data.from_data(array)
it = Iterator(data).cycle()[:20]
elems = []
for e in it:
elems.append(e.to_ndarray())
self.assertEqual(elems, list(range(10))*2)
def test_it2iter(self):
x_array = np.random.rand(10)
y_array = np.random.rand(10)
z_array = np.random.rand(10)
dagroup_dict = GroupManager.convert({"x": x_array, "y": y_array, "z": z_array},
chunks=Chunks({"x": (5, ), "y": (5, ), "z": (5, )}))
with Data(name="test") as data:
data.from_data(dagroup_dict)
it = Iterator(data).batchs(chunks=(1, )).cycle().to_iter()
for i, x_y_z in enumerate(it):
self.assertEqual(x_y_z[0][0], x_array[i])
self.assertEqual(x_y_z[0][1], y_array[i])
self.assertEqual(x_y_z[0][2], z_array[i])
break
def test_cycle_it_batch_cut(self):
x = range(10)
with Data(name="test", chunks=(3, )) as data:
data.from_data(x)
it = Iterator(data).batchs(chunks=(3, )).cycle()[:22]
elems = []
for e in it:
elems.append(e.batch.to_ndarray())
self.assertCountEqual(elems[0], [0, 1, 2])
self.assertCountEqual(elems[3], [9])
self.assertCountEqual(elems[4], [0, 1, 2])
self.assertCountEqual(elems[7], [9])
self.assertCountEqual(elems[8], [0, 1])
def test_from_batchs_to_iterator(self):
def _it():
for _ in range(100):
e = np.random.rand(3, 3)
yield (e, e)
it = BatchIterator.from_batchs(_it(), length=100,
dtypes=np.dtype([("x", np.dtype("float")), ("y", np.dtype("float"))]),
from_batch_size=3)
self.assertEqual(it.shape["x"], (100, 3))
self.assertEqual(it.shape["y"], (100, 3))
def chunk_sizes(seq):
return [len(list(row)) for row in seq]
class TestSeq(unittest.TestCase):
def setUp(self):
self.X = np.random.rand(10, 10)
def test_grouper_chunk_3(self):
seq = grouper_chunk(3, self.X)
self.assertEqual(chunk_sizes(seq), [3, 3, 3, 1])
def test_grouper_chunk_2(self):
seq = grouper_chunk(2, self.X)
self.assertEqual(chunk_sizes(seq), [2, 2, 2, 2, 2])
def test_grouper_chunk_10(self):
seq = grouper_chunk(10, self.X)
self.assertEqual(chunk_sizes(seq), [10])
def test_grouper_chunk_1(self):
seq = grouper_chunk(1, self.X)
self.assertEqual(chunk_sizes(seq), [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def test_grouper_chunk_7(self):
seq = grouper_chunk(7, self.X)
self.assertEqual(chunk_sizes(seq), [7, 3])
|
from flask import request, g, jsonify
from flask_cors import cross_origin
from alerta.auth.decorators import permission
from alerta.exceptions import ApiError, RejectException
from alerta.models.alert import Alert
from alerta.utils.api import process_alert, add_remote_ip, assign_customer
from . import webhooks
# {
# "second_probe": {},
# "check_type": "HTTP",
# "first_probe": {},
# "tags": [],
# "check_id": 803318,
# "current_state": "DOWN",
# "check_params": {
# "url": "/",
# "encryption": false,
# "hostname": "api.alerta.io",
# "basic_auth": false,
# "port": 80,
# "header": "User-Agent:Pingdom.com_bot_version_1.4_(http://www.pingdom.com/)",
# "ipv6": false,
# "full_url": "http://api.alerta.io/"
# },
# "previous_state": "UP",
# "check_name": "Alerta API on OpenShift",
# "version": 1,
# "state_changed_timestamp": 1498859836,
# "importance_level": "HIGH",
# "state_changed_utc_time": "2017-06-30T21:57:16",
# "long_description": "This is a test message triggered by a user in My Pingdom",
# "description": "test"
# }
def parse_pingdom(check):
if check['importance_level'] == 'HIGH':
severity = 'critical'
else:
severity = 'warning'
if check['current_state'] == 'UP':
severity = 'normal'
return Alert(
resource=check['check_name'],
event=check['current_state'],
correlate=['UP', 'DOWN'],
environment='Production',
severity=severity,
service=[check['check_type']],
group='Network',
value=check['description'],
text='%s: %s' % (check['importance_level'], check['long_description']),
tags=check['tags'],
attributes={'checkId': check['check_id']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check
)
@webhooks.route('/webhooks/pingdom', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission('write:webhooks')
def pingdom():
try:
incomingAlert = parse_pingdom(request.json)
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of pingdom check failed", 500)
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Oct 31, 2020
@author: Andrea Alfonsi
comments: Interface for Projectile Code without the creation of
a CSV but with the direct transfer of data to RAVEN in
finalizeCodeOutput method
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import numpy
from CodeInterfaceBaseClass import CodeInterfaceBase
from ProjectileInterface import Projectile
class ProjectileNoCSV(Projectile):
"""
Provides code to interface RAVEN to Projectile without the need of a CSV
The only method that changes is the finalizeCodeOutput (we return the data directly)
"""
def finalizeCodeOutput(self, command, output, workingDir):
"""
Called by RAVEN to modify output files (if needed) so that they are in a proper form.
In this case, the data are directly returned as a dictionary
@ In, command, string, the command used to run the just ended job
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, output, dict, the dictionary containing the data
"""
# open output file
outfileName = os.path.join(workingDir,output+".txt" )
headers, data = self._readOutputData(outfileName)
dat = numpy.asarray(data).T
output = {var:dat[i,:] for (i, var) in enumerate(headers)}
return output
|
from player.layer import ID_LAYER
from base import BaseTestCase
class TestOrder(BaseTestCase):
_auto_include = False
_settings = {'layer.order.test': 'l1 l2 l3'}
def test_custom_dir(self):
self.config.add_layer(
'test', 'l1', path='player:tests/dir1/')
self.config.add_layer(
'test', 'l2', path='player:tests/bundle/dir1/')
self.config.commit()
storage = self.registry.get(ID_LAYER)
self.assertIn('test', storage)
self.assertEqual(2, len(storage['test']))
self.assertEqual('l1', storage['test'][0]['name'])
self.assertEqual('l2', storage['test'][1]['name'])
class TestOrderUnknown(BaseTestCase):
_auto_include = False
_settings = {'layer.order.test2': 'l1 l2 l3'}
def test_custom_dir(self):
self.config.add_layer(
'test', 'l1', path='player:tests/dir1/')
self.config.add_layer(
'test', 'l2', path='player:tests/bundle/dir1/')
self.config.commit()
storage = self.registry.get(ID_LAYER)
self.assertIn('test', storage)
self.assertNotIn('test2', storage)
|
import warnings
from ._conf import PYRAMID_PARAMS
from ._funcs import _get_crs, _verify_shape_bounds
from ._types import Bounds, Shape
class GridDefinition(object):
"""Object representing the tile pyramid source grid."""
def __init__(
self, grid=None, shape=None, bounds=None, srs=None, is_global=False, **kwargs
):
if isinstance(grid, str) and grid in PYRAMID_PARAMS:
self.type = grid
self.shape = Shape(*PYRAMID_PARAMS[grid]["shape"])
self.bounds = Bounds(*PYRAMID_PARAMS[grid]["bounds"])
self.is_global = PYRAMID_PARAMS[grid]["is_global"]
self.crs = _get_crs(PYRAMID_PARAMS[grid]["srs"])
self.left, self.bottom, self.right, self.top = self.bounds
elif grid is None or grid == "custom":
for i in ["proj", "epsg"]:
if i in kwargs:
srs = {i: kwargs[i]} if srs is None else srs
warnings.warn(
DeprecationWarning(
"'%s' should be packed into a dictionary and passed to "
"'srs'" % i
)
)
self.type = "custom"
_verify_shape_bounds(shape=shape, bounds=bounds)
self.shape = Shape(*shape)
self.bounds = Bounds(*bounds)
self.is_global = is_global
self.crs = _get_crs(srs)
self.left, self.bottom, self.right, self.top = self.bounds
# check if parameters match with default grid type
for default_grid_name in PYRAMID_PARAMS:
default_grid = GridDefinition(default_grid_name)
if self.__eq__(default_grid):
self.type = default_grid_name
elif isinstance(grid, dict):
if "type" in grid:
warnings.warn(
DeprecationWarning("'type' is deprecated and should be 'grid'")
)
if "grid" not in grid:
grid["grid"] = grid.pop("type")
self.__init__(**grid)
elif isinstance(grid, GridDefinition):
self.__init__(**grid.to_dict())
else:
raise ValueError("invalid grid definition: %s" % grid)
@property
def srid(self):
warnings.warn(DeprecationWarning("'srid' attribute is deprecated"))
return self.crs.to_epsg()
def to_dict(self):
return dict(
bounds=self.bounds,
is_global=self.is_global,
shape=self.shape,
srs=dict(wkt=self.crs.to_wkt()),
type=self.type,
)
def from_dict(config_dict):
return GridDefinition(**config_dict)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.shape == other.shape and
self.bounds == other.bounds and
self.is_global == other.is_global and
self.crs == other.crs
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.type in PYRAMID_PARAMS:
return 'GridDefinition("%s")' % self.type
else:
return 'GridDefinition(' \
'"%s", ' \
'shape=%s, ' \
'bounds=%s, ' \
'is_global=%s, ' \
'srs=%s' \
')' % (
self.type,
tuple(self.shape),
tuple(self.bounds),
self.is_global,
self.crs
)
def __hash__(self):
return hash(repr(self))
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This method is used by the :class:`~.UVCC` Ansatz in order to construct its excitation operators. It
must be called for each type of excitation (singles, doubles, etc.) that is to be considered in the
Ansatz.
"""
from typing import Any, List, Tuple
import itertools
import logging
import operator
logger = logging.getLogger(__name__)
def generate_vibration_excitations(
num_excitations: int,
num_modals: List[int],
) -> List[Tuple[Tuple[Any, ...], ...]]:
"""Generates all possible excitations with the given number of excitations for the specified
number of particles distributed among the given number of spin orbitals.
This method assumes block-ordered spin-orbitals.
Args:
num_excitations: number of excitations per operator (1 means single excitations, etc.).
num_modals: the number of modals per mode.
Returns:
The list of excitations encoded as tuples of tuples. Each tuple in the list is a pair of
tuples. The first tuple contains the occupied spin orbital indices whereas the second one
contains the indices of the unoccupied spin orbitals.
"""
partial_sum_modals = list(itertools.accumulate(num_modals, operator.add))
# First, we construct the list of single excitations:
single_excitations = []
idx_sum = 0
for accumulated_sum in partial_sum_modals:
# the unoccupied modals in each mode are all modals but the lowest one:
unoccupied = list(range(idx_sum + 1, accumulated_sum))
# the single excitations for this mode are therefore simply each entry in this list, when
# excited into it from the lowest modal of this list:
single_excitations.extend([(idx_sum, m) for m in unoccupied])
# and now we update the running index of the lowest modal for the next mode
idx_sum = accumulated_sum
logger.debug("Generated list of single excitations: %s", single_excitations)
# we can find the actual list of excitations by doing the following:
# 1. combine the single alpha- and beta-spin excitations
# 2. find all possible combinations of length `num_excitations`
pool = itertools.combinations(single_excitations, num_excitations)
excitations = []
visited_excitations = set()
for exc in pool:
# validate an excitation by asserting that all indices are unique:
# 1. get the frozen set of indices in the excitation
exc_set = frozenset(itertools.chain.from_iterable(exc))
# 2. all indices must be unique (size of set equals 2 * num_excitations)
# 3. and we also don't want to include permuted variants of identical excitations
if len(exc_set) == num_excitations * 2 and exc_set not in visited_excitations:
visited_excitations.add(exc_set)
exc_tuple = tuple(zip(*exc))
excitations.append(exc_tuple)
logger.debug("Added the excitation: %s", exc_tuple)
return excitations
|
from six.moves import configparser
import torch
def loadConfig(path):
#========= Load settings from Config file
config = configparser.RawConfigParser()
config.read(path)
#[data paths]
path_dataset = config.get('data paths', 'path_dataset')
#[experiment name]
name = config.get('experiment name', 'name')
#[training settings]
N_epochs = int(config.get('training settings', 'N_epochs'))
batch_size = int(config.get('training settings', 'batch_size'))
lr = float(config.get('training settings', 'lr'))
flip_augmentation = config.getboolean('training settings', 'flip_augmentation')
affine_augmentation = config.getboolean('training settings', 'affine_augmentation')
mixture_augmentation = config.getboolean('training settings', 'mixture_augmentation')
# hyperparameters
hyperparameters = {
"path_dataset": path_dataset,
"name": name,
"N_epochs": N_epochs,
"batch_size": batch_size,
"lr": lr,
"flip_augmentation": flip_augmentation,
"affine_augmentation": affine_augmentation,
"mixture_augmentation": mixture_augmentation,
"device": 'cuda' if torch.cuda.is_available() else 'cpu'
}
return hyperparameters
if __name__ == '__main__':
loadConfig('../config.txt')
|
def create_tables():
comm = {"criar_cliente": """
CREATE TABLE IF NOT EXISTS cliente (
clie_id SERIAL PRIMARY KEY,
clie_cpf_cnpj VARCHAR(15) NOT NULL UNIQUE,
clie_nome VARCHAR(60) NOT NULL,
clie_fone VARCHAR(40),
clie_email VARCHAR(50) NOT NULL,
clie_rg VARCHAR(13),
clie_celular VARCHAR(25),
clie_tipo VARCHAR(20),
clie_rua VARCHAR(60) NOT NULL,
clie_bairro VARCHAR(60) NOT NULL,
clie_numero VARCHAR(10),
clie_cidade VARCHAR(50) NOT NULL,
clie_estado VARCHAR(2) NOT NULL,
clie_CEP varchar(15)
)
""",
"criar_veiculo": """
CREATE TABLE IF NOT EXISTS veiculo (
veic_placa VARCHAR(15) PRIMARY KEY,
veic_clie_id INT,
veic_marca VARCHAR(50) NOT NULL,
veic_modelo VARCHAR(60) NOT NULL,
CONSTRAINT fk_veic_clie
FOREIGN KEY(veic_clie_id)
REFERENCES cliente(clie_id)
ON DELETE CASCADE
)
""",
"criar_empresas": """
CREATE TABLE IF NOT EXISTS empresas (
emp_cnpj VARCHAR PRIMARY KEY,
emp_razaosocial VARCHAR(100) NOT NULL,
emp_nomefantasia VARCHAR(100) NOT NULL,
emp_inscricaoestadual VARCHAR(40),
emp_email VARCHAR(40) NOT NULL,
emp_fone VARCHAR(20) NOT NULL,
emp_site VARCHAR(100),
emp_rua VARCHAR(60) NOT NULL,
emp_bairro VARCHAR(60) NOT NULL,
emp_numero VARCHAR(10),
emp_cidade VARCHAR(50) NOT NULL,
emp_estado VARCHAR(2) NOT NULL,
emp_CEP varchar(15)
)
""",
"criar_usuarios": """
CREATE TABLE IF NOT EXISTS usuarios (
usu_id SERIAL PRIMARY KEY,
usu_emp_cnpj VARCHAR(20),
usu_login VARCHAR(255) NOT NULL UNIQUE,
usu_senha TEXT NOT NULL,
usu_nivel_acesso VARCHAR(20),
usu_cpf VARCHAR(15) NOT NULL,
usu_nome VARCHAR(60) NOT NULL,
usu_fone VARCHAR(40),
usu_email VARCHAR(50) NOT NULL,
usu_rg VARCHAR(13),
usu_celular VARCHAR(25),
CONSTRAINT fk_usu_emp
FOREIGN KEY(usu_emp_cnpj)
REFERENCES empresas(emp_cnpj)
ON DELETE CASCADE ON UPDATE CASCADE
)
""",
"criar_operador": """
CREATE TABLE IF NOT EXISTS operador (
ope_id INT PRIMARY KEY
)
""",
"criar_emp_img": """
CREATE TABLE IF NOT EXISTS emp_img (
emp_cnpj VARCHAR PRIMARY KEY,
img_ext VARCHAR(60) NOT NULL,
img_dados BYTEA NOT NULL,
FOREIGN KEY (emp_cnpj)
REFERENCES empresas (emp_cnpj)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_fornecedor": """
CREATE TABLE IF NOT EXISTS fornecedor (
forn_id SERIAL PRIMARY KEY,
forn_nome VARCHAR(50) NOT NULL,
forn_cnpj VARCHAR(20) NOT NULL,
forn_email VARCHAR(60) NOT NULL,
forn_fone VARCHAR(50),
forn_rua VARCHAR(60) NOT NULL,
forn_bairro VARCHAR(60) NOT NULL,
forn_numero VARCHAR(10),
forn_cidade VARCHAR(50) NOT NULL,
forn_estado VARCHAR(2) NOT NULL,
forn_CEP varchar(15)
)
""",
"criar_categoria": """
CREATE TABLE IF NOT EXISTS categoria (
cat_id SERIAL PRIMARY KEY,
cat_descricao VARCHAR(50)
)
""",
"criar_produtos": """
CREATE TABLE IF NOT EXISTS produtos (
prod_id SERIAL PRIMARY KEY,
prod_forn_id INT,
prod_cat_id INT,
prod_estoque INT,
prod_codbarras VARCHAR(15) NOT NULL,
prod_desc VARCHAR(50) NOT NULL,
prod_marca VARCHAR(60) NOT NULL,
prod_preco FLOAT NOT NULL,
CONSTRAINT fk_prod_forn
FOREIGN KEY(prod_forn_id)
REFERENCES fornecedor(forn_id)
ON DELETE CASCADE,
CONSTRAINT fk_prod_cat
FOREIGN KEY(prod_cat_id)
REFERENCES categoria(cat_id)
ON DELETE CASCADE
)
""",
"criar_prod_img": """
CREATE TABLE IF NOT EXISTS prod_img (
prod_id INTEGER PRIMARY KEY,
img_ext VARCHAR(60) NOT NULL,
img_dados BYTEA NOT NULL,
FOREIGN KEY (prod_id)
REFERENCES produtos (prod_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_serviços": """
CREATE TABLE IF NOT EXISTS servicos (
serv_id SERIAL PRIMARY KEY,
serv_desc VARCHAR(60) NOT NULL,
serv_preco FLOAT NOT NULL
)
""",
"criar_vendatmp": """
CREATE TABLE IF NOT EXISTS venda_tmp (
venda_cod_interno SERIAL PRIMARY KEY,
venda_clie_id INT,
venda_veic_placa VARCHAR(15),
venda_id INT NOT NULL,
venda_prod_serv_id INT NOT NULL,
venda_tipo VARCHAR(15) NOT NULL,
venda_qtd INT DEFAULT 1,
venda_valor FLOAT NOT NULL,
venda_desconto FLOAT,
venda_datahora TIMESTAMP,
FOREIGN KEY (venda_clie_id)
REFERENCES cliente (clie_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_venda": """
CREATE TABLE IF NOT EXISTS vendas_itens (
venda_cod_interno SERIAL PRIMARY KEY,
venda_id INT NOT NULL,
venda_prod_serv_id INT NOT NULL,
venda_tipo VARCHAR(15) NOT NULL,
venda_qtd INT NOT NULL,
venda_valor FLOAT NOT NULL,
venda_desconto FLOAT,
venda_datahora TIMESTAMP
)
""",
"criar_finalizadoras": """
CREATE TABLE IF NOT EXISTS finalizadoras (
fin_id SERIAL PRIMARY KEY,
fin_desc VARCHAR(60)
)
""",
"criar_fin_venda": """
CREATE TABLE IF NOT EXISTS vendas_fin (
vendas_fin_id SERIAL PRIMARY KEY,
fin_id INT,
vendas_id INT,
vendas_fin_valor FLOAT,
FOREIGN KEY (fin_id)
REFERENCES finalizadoras (fin_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_venda_cabeçalho": """
CREATE TABLE IF NOT EXISTS vendas (
venda_id INT PRIMARY KEY,
venda_clie_id INT,
venda_veic_placa VARCHAR(15),
venda_qtd_itens INT,
venda_total_descontos FLOAT,
venda_valor_total FLOAT,
venda_status VARCHAR(50),
venda_datahora TIMESTAMP,
FOREIGN KEY (venda_clie_id)
REFERENCES cliente (clie_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_compratmp": """
CREATE TABLE IF NOT EXISTS compra_tmp (
compra_cod_interno SERIAL PRIMARY KEY,
compra_forn_id INT,
compra_id INT NOT NULL,
compra_prod_id INT NOT NULL,
compra_qtd INT DEFAULT 1,
compra_valor FLOAT NOT NULL,
compra_datahora TIMESTAMP,
FOREIGN KEY (compra_forn_id)
REFERENCES fornecedor (forn_id)
ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (compra_prod_id)
REFERENCES produtos (prod_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_compra": """
CREATE TABLE IF NOT EXISTS compra_itens (
compra_cod_interno SERIAL PRIMARY KEY,
compra_id INT NOT NULL,
compra_prod_id INT NOT NULL,
compra_qtd INT NOT NULL,
compra_valor FLOAT NOT NULL,
compra_datahora TIMESTAMP,
FOREIGN KEY (compra_prod_id)
REFERENCES produtos (prod_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_fin_compra": """
CREATE TABLE IF NOT EXISTS compra_fin (
compra_fin_id SERIAL PRIMARY KEY,
fin_id INT,
compra_id INT,
compra_fin_valor FLOAT,
FOREIGN KEY (fin_id)
REFERENCES finalizadoras (fin_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_compra_cabeçalho": """
CREATE TABLE IF NOT EXISTS compras (
compra_id INT PRIMARY KEY,
compra_forn_id INT,
compra_qtd_itens INT,
compra_valor_total FLOAT,
compra_status VARCHAR(50),
compra_datahora TIMESTAMP,
FOREIGN KEY (compra_forn_id)
REFERENCES fornecedor (forn_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_pendencias": """
CREATE TABLE IF NOT EXISTS pendencias(
pend_id SERIAL PRIMARY KEY,
pend_clie_id INT,
pend_venda_id INT,
pend_veic_placa VARCHAR(15),
pend_datahora TIMESTAMP,
pend_valor FLOAT,
FOREIGN KEY (pend_clie_id)
REFERENCES cliente (clie_id)
ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (pend_venda_id)
REFERENCES vendas (venda_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"criar_function_prod": """
CREATE OR REPLACE FUNCTION add_prod(
descricao VARCHAR,
codbarra VARCHAR,
marca VARCHAR,
estoque INT,
preco FLOAT,
fornecedor_id INT,
categoria_id INT
) RETURNS INTEGER AS $$
DECLARE
f_prod_id INT;
BEGIN
INSERT INTO produtos (prod_forn_id, prod_cat_id, prod_estoque, prod_codbarras, prod_desc, prod_marca,
prod_preco) VALUES (fornecedor_id, categoria_id, estoque, codbarra, descricao, marca, preco)
RETURNING prod_id INTO f_prod_id;
RETURN f_prod_id;
END;
$$ LANGUAGE plpgsql;
"""
}
conn = None
import psycopg2
from Funcoes.configdb import Banco
params = Banco.get_params()
try:
conn = psycopg2.connect(**params)
except Exception as e:
print(e.__class__)
from Funcoes.utils import show_msg
show_msg("erro", "Erro", "Erro no banco de dados")
return False
else:
cur = conn.cursor()
for command in comm.values():
cur.execute(command)
conn.commit()
cur.close()
return True
finally:
if conn is not None:
conn.close()
|
if __name__ == '__main__':
n = int(input())
ans = ""
for i in range(1, n + 1):
ans += str(i)
print(ans)
|
# License: BSD 3-Clause
from .study import OpenMLStudy, OpenMLBenchmarkSuite
from .functions import (
get_study,
get_suite,
create_study,
create_benchmark_suite,
update_study_status,
update_suite_status,
attach_to_study,
attach_to_suite,
detach_from_study,
detach_from_suite,
delete_study,
delete_suite,
list_studies,
list_suites,
)
__all__ = [
"OpenMLStudy",
"OpenMLBenchmarkSuite",
"attach_to_study",
"attach_to_suite",
"create_benchmark_suite",
"create_study",
"delete_study",
"delete_suite",
"detach_from_study",
"detach_from_suite",
"get_study",
"get_suite",
"list_studies",
"list_suites",
"update_suite_status",
"update_study_status",
]
|
#!/usr/bin/env python3
"""
Instruction Format
<Function Type >
< DEST0 N ID > < DEST0 INDEX >
< DEST1 N ID > < DEST1 INDEX >
< DEST2 N ID > < DEST2 INDEX >
< SRC0 N ID > < SRC0 INDEX >
< SRC1 N ID > < SRC1 INDEX >
< SRC2 N ID > < SRC2 INDEX >
------------------------------
<Function Type> can be of the type:
FN_PASS : 0
FN_ADD : 1
FN_SUB : 2
FN_MUL : 3
FN_MAC ; 4
FN_DIV : 5
FN_SQR : 6
FN_SIG : 7
FN_GAU : 8
------------------------------
<N ID> is Namespace ID. It can be of the type:
NAMESPACE_NULL : 0
NAMESPACE_WEIGHT : 1
NAMESPACE_DATA : 2
NAMESPACE_GRADIENT : 3
NAMESPACE_INTERIM : 4
NAMESPACE_META : 5
NAMESPACE_NEIGHBOR : 6 # [0] = PE_NEIGHBOR, [1] = PU_NEIGHBOR
NAMESPACE_BUS : 7 # [0] = PE_BUS, [1] = PU_BUS
------------------------------
<INDEX> means different things:
When N ID is weight, data, gradient, meta, interim: INDEX is the actual index in the namespace
When N ID is neighbor: INDEX [0] is neighboring PE id, or INDEX [1] is neighboring PU id
When N ID is bus: INDEX [0] is PE id using the pe bus, or INDEX [1] is PU id and PE id
"""
import json
from os import listdir
from os.path import isfile, join
from pe import Pe
from pu import Pu
from inst import Inst
from namespace import Ns_entry
from binary import get_peid
import sys
sys.path.insert(0, 'include')
import code
pe_objects = []
cycle = 1
max_cycle = 1
num_pes = 0
pes_per_pu = 0
op_bit = 0
ns_bit = 0
index_bit = 0
nn_nb_bit = 0
def read_config(path):
print("reading config file...", end='')
with open(path, 'r') as f:
config = f.read()
f.close()
config = json.loads(config)
global num_pes, pes_per_pu, op_bit, ns_bit, index_bit, nn_nb_bit
num_pes = config["num_pes"]
pes_per_pu = config["pes_per_pu"]
ns_size = config["namespace_size"]
ns_int_size = config["namespace_interim_size"]
op_bit = config["op_bit"]
ns_bit = config["ns_bit"]
index_bit = config["index_bit"]
nn_nb_bit = config["nn_nb_bit"]
for pe_id in range(num_pes):
pe = Pe(id=pe_id, ns_size=ns_size, ns_int_size=ns_int_size)
pe_objects.append(pe)
for pe_id, pe in enumerate(pe_objects):
if pe_id != len(pe_objects) - 1:
pe.prev = pe_objects[pe_id + 1]
else:
pe.prev = pe_objects[0]
print("done")
def init_namespace():
""" Fills in namespace buffer with initial data.
This function won't be needed once memory interface instructions are working.
It fills in dummy data for linear regression.
"""
print("initializing pe namespace...", end='')
pe0 = pe_objects[0]
pe1 = pe_objects[1]
pe2 = pe_objects[2]
pe3 = pe_objects[3]
pe0.nw.insert(0, Ns_entry(data=1))
pe0.nd.insert(0, Ns_entry(data=3))
pe1.nw.insert(0, Ns_entry(data=2))
pe1.nd.insert(0, Ns_entry(data=4))
pe2.nw.insert(0, Ns_entry(data=3))
pe2.nd.insert(0, Ns_entry(data=5))
pe3.nd.insert(0, Ns_entry(data=4))
pe0.nw.insert(1, Ns_entry(data=1))
pe0.nd.insert(1, Ns_entry(data=3))
pe1.nw.insert(1, Ns_entry(data=2))
pe1.nd.insert(1, Ns_entry(data=4))
pe2.nw.insert(1, Ns_entry(data=3))
pe2.nd.insert(1, Ns_entry(data=5))
pe0.nm.insert(0, Ns_entry(data=1))
pe1.nm.insert(0, Ns_entry(data=1))
pe2.nm.insert(0, Ns_entry(data=1))
print("done")
def load_inst(bin_files):
print("loading instructions...", end='')
for bin_file in bin_files:
with open(bin_file, 'r') as f:
binary_stream = f.read()
binary_lines = separate_stream(binary_stream)
inst_list = []
for line in binary_lines:
inst = bin_decode(line)
inst_list.append(inst)
peid = get_peid(bin_file)
pe = pe_objects[peid]
pe.inst = inst_list
print("done")
def set_maxcycle():
global max_cycle
pe0 = pe_objects[0]
max_cycle = len(pe0.inst)
def separate_stream(binary_stream):
binary_lines = []
bin_line = ''
for char in binary_stream:
if char != '\n':
bin_line += char
else:
binary_lines.append(bin_line)
bin_line = ''
return binary_lines
def bin_decode(bin_line):
if bin_line == '0':
inst = Inst()
d = {
"op": "pass",
"dests": [],
"srcs": []
}
inst.fromDict(d)
return inst
op_bits = bin_line[:op_bit]
dests = []
srcs = []
op = code.op_inv[int(op_bits, 2)]
start_point = op_bit
for i in range(3):
ns_bits = bin_line[start_point:start_point + ns_bit]
ns = code.ns_inv[int(ns_bits, 2)]
if ns == "NN":
pe_pu_bit = bin_line[start_point + ns_bit + index_bit + nn_nb_bit - 1]
if pe_pu_bit == '0':
ns = "NN0_out"
else:
ns = "NN1_out"
elif ns == "NB":
pe_pu_bit = bin_line[start_point + ns_bit + index_bit + nn_nb_bit - 1]
if pe_pu_bit == '0':
ns = "NB0_out"
else:
ns = "NB1_out"
index_bits = bin_line[start_point + ns_bit:start_point + ns_bit + index_bit]
index = int(index_bits, 2)
dest = {
"dest_nid": ns,
"dest_index": index
}
dests.append(dest)
start_point += (ns_bit + index_bit + nn_nb_bit)
for i in range(3):
ns_bits = bin_line[start_point:start_point + ns_bit]
ns = code.ns_inv[int(ns_bits, 2)]
if ns == "NN":
pe_pu_bit = bin_line[start_point + ns_bit + index_bit + nn_nb_bit - 1]
if pe_pu_bit == '0':
ns = "NN0_in"
else:
ns = "NN1_in"
elif ns == "NB":
pe_pu_bit = bin_line[start_point + ns_bit + index_bit + nn_nb_bit - 1]
if pe_pu_bit == '0':
ns = "NB0_in"
else:
ns = "NB1_in"
index_bits = bin_line[start_point + ns_bit:start_point + ns_bit + index_bit]
index = int(index_bits, 2)
src = {
"src_nid": ns,
"src_index": index
}
srcs.append(src)
start_point += (ns_bit + index_bit + nn_nb_bit)
inst = Inst()
d = {
"op": op,
"dests": dests,
"srcs": srcs
}
inst.fromDict(d)
return inst
def print_welcome():
print()
print("Welcome to TABLA Simulator!")
print()
def print_help():
print("***** HELP MENU *****")
print("Enter 'r' to run instructions in the next cycle.")
print("Enter 'p' to print the status of PEs and PUs.")
print("Enter 'h' for this help message.")
print("Enter 'q' to quit.")
def print_exit():
print("Simulator exited.")
def print_stat(pe_pu_list=None):
print("***** STATUS *****")
global num_pes, pes_per_pu, pe_objects, cycle, max_cycle
if pe_pu_list is None or len(pe_pu_list) == 0:
print("Number of PE's:", num_pes)
print("Number of PE's-per-PU:", pes_per_pu)
print("Next cycle:", cycle)
print("Total cycles:", max_cycle)
elif pe_pu_list is not None:
for pe_pu in pe_pu_list:
if pe_pu[:2] == "pe":
if pe_pu[2:].isdigit():
pe_id = int(pe_pu[2:])
pe = pe_objects[pe_id]
print(pe)
elif pe_pu[:2] == "pu":
pass
else:
raise Exception("Invalid pe pu arguments. Enter either pe or pu.")
def run():
""" Runs one instruction in each PE in a specific cycle. """
global cycle
print("Running cycle {:d} instructions...".format(cycle), end='')
for pe in pe_objects:
inst_ptr = cycle - 1
inst = pe.inst[inst_ptr]
#print(pe.id)
#print(inst)
pe.exec_inst(inst)
cycle += 1
print("done!")
bin_path = "./bin/"
prompt = ">> "
config_path = "./config.json"
if __name__ == '__main__':
bin_files = [join(bin_path, f) for f in listdir(bin_path) if isfile(join(bin_path, f))]
read_config(config_path)
init_namespace()
load_inst(bin_files)
set_maxcycle()
print_welcome()
print_stat()
print()
print_help()
kbd_in = input(prompt)
while kbd_in != 'q':
if kbd_in == '':
pass
elif kbd_in == 'h':
print_help()
elif kbd_in == 'r':
if cycle > max_cycle:
print("[WARNING] All instructions complete.")
kbd_in = input(prompt)
continue
else:
run()
elif kbd_in[0] == 'p':
pe_pu_ids = kbd_in.split()
print_stat(pe_pu_ids[1:])
else:
print("[ERROR] Invalid option.")
print_help()
kbd_in = input(prompt)
print_exit()
|
""" Crie um programa que leia nome e duas notas de vários alunos e guarte tudo em uma lista composta.
No final, mostre um boletim contendo a média de cada um e permita que o usuário possa mostrar as notas
de cada aluno individualmente."""
lista_main = []
while True:
nome = str(input('Nome: '))
nota1 = float(input('nota 1: '))
nota2 = float(input('Nota 2: '))
media = nota1 + nota2 / 2
lista_main.append([nome, [nota1, nota2], media])
resp = str(input('Quer continuar?[S/N] '))
if resp in 'Nn':
break
print('-'*22)
print(f'{"nª":<4}{"nome":<10}{"média":>8}')
print('-'*22)
for indice, aluno in enumerate(lista_main):
print(f'{indice:<4}{aluno[0]:<10}{aluno[2]:>8.1f}')
while True:
print('-'*30)
opc = int(input('Mostrar notas de qual aluno? (digite "999" para finalizar)'))
if opc == 999:
print('FINALIZANDO...')
break
if opc <= len(lista_main) - 1:
print(f'Notas de {lista_main[opc][0]} são {lista_main[opc][1]}')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Flavor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=12)),
('memory', models.IntegerField()),
('vcpu', models.IntegerField()),
('disk', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
]
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# factories
from .Project import Project as project
# end of file
|
from flask import Flask
from flask import request
from flask import send_from_directory
from flask.wrappers import Response
import json
app = Flask(__name__)
from flask import send_file
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import time
from multiprocessing import Process, Value
import subprocess
import random
import os
import sys
sys.path.append(os.getcwd())
import motionmodule
import cameramodule
import tiltModule
import buzzModule
import temperatureModule
import rgbLedModule
from camera import VideoCamera
cred = credentials.Certificate("firebase.json")
firebase = firebase_admin.initialize_app(cred, {
'databaseURL': 'https://hacksociety-rpi.firebaseio.com'
})
def getTemperatureRange():
ref=db.reference('temperatureRange')
return ref.get()
def motionDetected(flag):
ref = db.reference('motion')
ref.update({
'motionDetected': flag
})
@app.route('/', methods=['GET'])
def index():
ref = db.reference('data')
getTemperatureRange()
return Response(json.dumps(ref.get()), 200)
@app.route('/image', methods=['GET'])
def get_image():
cam = cameramodule.cameramodule()
return send_file("img.jpg", mimetype='image/gif')
@app.route('/base/<filename>', methods=['GET'])
def base_static(filename):
return send_from_directory(app.root_path + '/', "img.jpg")
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/startVideo', methods=['GET'])
def startVideo():
return str(os.system("sudo service motion start"))
@app.route('/stopVideo', methods=['GET'])
def stopVideo():
return str(os.system("sudo service motion stop"))
def updateTemp():
print("TEMP")
on = 'g'
ref = db.reference('temperature')
t = temperatureModule.temperature()
temp = t.getTemperatureFormatted()
ref.update({
'current': temp
})
rng = getTemperatureRange()
rgb = rgbLedModule.rgbModule()
if (float(temp) < rng['min']):
print("b")
if (on != 'b'):
rgb.turnOffLed(on)
rgb.turnOnLed('b')
on = 'b'
elif (float(temp) >= rng['min'] and float(temp) < rng['max']):
print("g")
if (on != 'g'):
rgb.turnOffLed(on)
rgb.turnOnLed('g')
on = 'g'
else:
print("r")
if (on != 'r'):
rgb.turnOffLed(on)
rgb.turnOnLed('r')
on = 'r'
def updateTilt():
t = tiltModule.tilt()
value = t.getValue()
ref = db.reference('tilt')
ref.update({
'value': value
})
if (value == 1):
b = buzzModule.buzz()
b.start()
time.sleep(1)
b.stop()
def runLoopTemp():
while True:
pass
updateTemp()
def runLoop():
while True:
pass
updateTemp()
m = motionmodule.motion()
motionDetected(m.getValue())
updateTilt()
if __name__ == '__main__':
p = Process(target=runLoop)
p2 = Process(target=runLoopTemp)
p.start()
p2.start()
app.run(host='0.0.0.0', port=5000 ,debug=False)
p.join()
p2.join()
|
##########################################################################
## Prediction Package Tests
##########################################################################
# to execute tests, run from *project* root. This runs all test packages
# (this one and any other in the /tests folder)
#
# nosetests --verbosity=2 --with-coverage --cover-inclusive --cover-erase tests
#
# for a list of available asserts:
# https://docs.python.org/2/library/unittest.html#assert-methods
##########################################################################
## Imports
##########################################################################
import unittest
from unittest import skip
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
#Example import from our data structure
#from prediction.my_file import MyObjectName
##########################################################################
## Tests
##########################################################################
class PredictionTests(unittest.TestCase):
def test_can_run_tests(self):
"""
Make sure that nose tests works for test_prediction.py
"""
assert(True)
@skip('Test not written yet')
def test_can_predict_risk(self):
"""
Just an example of a test we might want to write.
"""
pass
@skip('Work in progress')
def test_many_models_handles_reloading(self):
import train_models
modeler = train_models.ManyModels()
dataset = Bunch('stuff') #need to get this to actually load a bunch of data
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.33, random_state=1)
modeler.X = X_train
modeler.y = y_train
#Attach our unfitted model instances to the modeler instance
kn12 = KNeighborsClassifier(n_neighbors=12)
kn6 = KNeighborsClassifier(n_neighbors=6)
rf = RandomForestClassifier()
modeler.models = {"KNeighborsClassifier_6": kn6
, "KNeighborsClassifier_12": kn12
, "RandomForestClassifier": rf
}
#In another test, make sure all 3 methods work
modeler.fit("KNeighborsClassifier_6") #fit just one model
modeler.fit(model_list=['KNeighborsClassifier_12', 'RandomForestClassifier']) #fit a list of models
modeler.fit() #fits all models
modeler.X_test = X_test
predicted_df = modeler.predict(model_list=['KNeighborsClassifier_12'])
assert(modeler.answers.head()) #should have content
modeler.X_test = X_test
assert(modeler.answers.head()) #should be empty dataframe
predicted_df = modeler.predict(model_list=['KNeighborsClassifier_6','KNeighborsClassifier_12', 'RandomForestClassifier'])
assert(modeler.answers.head()) #should have content again
|
import functools
import itertools
import re
from ispyb import sqlalchemy
# if we replace uuid with count do not include 0 in the count because it will break some bool checks for None
WrapperID = itertools.count(1)
class Table:
def __init__(
self,
columns,
primary_key,
unique=None,
counters=None,
append=None,
required=None,
):
self.columns = columns
self._tab = {c: [] for c in self.columns}
self._primary_key = primary_key
self._last_update = {self: 0}
if unique is None:
self._unique = None
else:
if isinstance(unique, str):
self._unique = [unique]
else:
self._unique = unique
self._unique = self._make_list(unique, default=None)
self._counters = self._make_list(counters, default=[])
self._append = self._make_list(append, default=[])
self._required = self._make_list(required, default=[])
def __getitem__(self, key):
return self._tab[key]
@staticmethod
def _make_list(elem, *, default):
if elem is None:
return default
elif isinstance(elem, list):
return elem
return [elem]
def add_row(self, row):
for req in self._required:
if row.get(req) is None:
return None
modified = False
unique_check = self._unique_check(row)
prim_key_arg = unique_check or row.get(self._primary_key)
if unique_check is None or prim_key_arg not in self._tab[self._primary_key]:
try:
for counter in self._counters:
row[counter] = len(self._tab[counter]) + 1
except TypeError:
pass
else:
for counter in self._counters:
index = self._tab[self._primary_key].index(prim_key_arg)
row[counter] = self._tab[counter][index]
# if no primary key is specified and the uniqueness check has not returned one then add the row
# with a new primary key id
if prim_key_arg is None or prim_key_arg not in self._tab[self._primary_key]:
modified = True
for c in self.columns:
if c == self._primary_key:
self._tab[c].append(prim_key_arg or next(WrapperID))
else:
self._tab[c].append(row.get(c))
# otherwise use existing primary key
else:
index = self._tab[self._primary_key].index(prim_key_arg)
for c in self.columns:
if c != self._primary_key:
row_value = row.get(c)
if self._tab[c][index] != row_value:
# if column c is marked for appending to then append to it:
# if current value is a list and the new row value is a list, set, or a tuple
# then combine them as sets and cast back to list for json serialisation,
# otherwise add the new value as an item to the set and cast to list
if c in self._append:
if row_value is not None:
if not isinstance(self._tab[c][index], list):
if self._tab[c][index] is None:
self._tab[c][index] = []
else:
self._tab[c][index] = [self._tab[c][index]]
curr_as_set = set(self._tab[c][index])
curr_orig = set(self._tab[c][index])
if isinstance(row_value, (list, set, tuple)):
curr_as_set |= set(row_value)
self._tab[c][index] = list(curr_as_set)
else:
curr_as_set.add(row_value)
self._tab[c][index] = list(curr_as_set)
if curr_orig.symmetric_difference(
set(self._tab[c][index])
):
modified = True
else:
modified = True
self._tab[c][index] = row_value
if modified:
if prim_key_arg is None:
return row.get(self._primary_key) or self._tab[self._primary_key][-1]
else:
return prim_key_arg
else:
return None
# check if the row being added has already existing values for the columns marked as unique
def _unique_check(self, in_values):
try:
unique_indices = []
for u in self._unique:
if in_values.get(u) in self._tab[u]:
indices = [
i
for i, element in enumerate(self._tab[u])
if element == in_values.get(u)
]
if isinstance(indices, list):
unique_indices.append(indices)
else:
unique_indices.append([indices])
else:
break
else:
for i1, ui1 in enumerate(unique_indices):
for i2, ui2 in enumerate(unique_indices):
if i1 != i2:
if set(ui1).isdisjoint(ui2):
return None
overlap_list = unique_indices[0]
for i in range(len(unique_indices) - 1):
curr_overlap = set(unique_indices[i]).intersection(
unique_indices[i + 1]
)
overlap_list = list(set(overlap_list).intersection(curr_overlap))
if not overlap_list:
return None
return self._tab[self._primary_key][overlap_list[0]]
return None
except TypeError:
return None
def get_row_index(self, key, value):
if value is None:
return None
indices = [i for i, element in enumerate(self._tab[key]) if element == value]
if indices:
if len(indices) == 1:
return indices[0]
else:
return indices
return None
def get_row_by_primary_key(self, value):
row_index = self.get_row_index(self._primary_key, value)
return {c: self._tab[c][row_index] for c in self.columns}
def to_snake_case(camel_case):
return re.sub(r"(?<!^)(?=[A-Z])", "_", camel_case).lower()
def parse_sqlalchemy_table(sa_table):
columns, primary = [], None
for column in sa_table.__table__.columns:
columns.append(to_snake_case(column.name))
if column.primary_key and not primary:
primary = columns[-1]
return columns, primary
class MotionCorrectionTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.MotionCorrection)
columns.append("created_time_stamp")
columns.append("drift_data")
# columns.append("job_string")
super().__init__(
columns,
prim_key,
unique="micrograph_full_path",
counters="image_number",
required="micrograph_full_path",
)
class CTFTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.CTF)
super().__init__(
columns,
prim_key,
unique="motion_correction_id",
required="motion_correction_id",
)
class ParticlePickerTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.ParticlePicker)
columns.extend(
[
"micrograph_full_path",
"mc_image_full_path",
"particle_coordinates",
"job_string",
]
)
super().__init__(
columns,
prim_key,
unique=["micrograph_full_path", "job_string"],
required="first_motion_correction_id",
)
class ParticleClassificationGroupTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(
sqlalchemy.ParticleClassificationGroup
)
columns.append("job_string")
columns.append("class_images_stack")
columns.append("class_images_modification_time")
super().__init__(
columns,
prim_key,
unique="job_string",
required=["job_string", "particle_picker_id"],
)
class ParticleClassificationTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.ParticleClassification)
columns.append("job_string")
super().__init__(
columns,
prim_key,
unique=["job_string", "class_number"],
required=["class_number", "particle_classification_group_id"],
)
class CryoemInitialModelTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.CryoemInitialModel)
columns.append("ini_model_job_string")
columns.append("particle_classification_id")
super().__init__(
columns,
prim_key,
unique="ini_model_job_string",
append="particle_classification_id",
required="particle_classification_id",
)
class RelativeIceThicknessTable(Table):
def __init__(self):
columns, prim_key = parse_sqlalchemy_table(sqlalchemy.RelativeIceThickness)
super().__init__(
columns,
prim_key,
unique="motion_correction_id",
required="motion_correction_id",
)
@functools.singledispatch
def insert(primary_table, end_time, source, relion_options, **kwargs):
raise ValueError(f"{primary_table!r} is not a known Table")
@insert.register(MotionCorrectionTable)
def _(
primary_table: MotionCorrectionTable,
end_time,
source,
relion_options,
row,
):
row.update(
{
"dose_per_frame": relion_options.motioncor_doseperframe,
"patches_used_x": relion_options.motioncor_patches_x,
"patches_used_y": relion_options.motioncor_patches_y,
}
)
pid = primary_table.add_row(row)
return pid
@insert.register(CTFTable)
def _(
primary_table: CTFTable,
end_time,
source,
relion_options,
row,
):
row.update(
{
"box_size_x": relion_options.ctffind_boxsize,
"box_size_y": relion_options.ctffind_boxsize,
"min_resolution": relion_options.ctffind_minres,
"max_resolution": relion_options.ctffind_maxres,
"min_defocus": relion_options.ctffind_defocus_min,
"max_defocus": relion_options.ctffind_defocus_max,
"defocus_step_size": relion_options.ctffind_defocus_step,
}
)
pid = primary_table.add_row(row)
return pid
@insert.register(ParticlePickerTable)
def _(
primary_table: ParticlePickerTable,
end_time,
source,
relion_options,
row,
):
row.update(
{
"particle_picking_template": relion_options.cryolo_gmodel,
"particle_diameter": int(
relion_options.extract_boxsize
* relion_options.angpix
/ relion_options.motioncor_binning
)
/ 10,
}
)
pid = primary_table.add_row(row)
return pid
@insert.register(ParticleClassificationGroupTable)
def _(
primary_table: ParticleClassificationGroupTable,
end_time,
source,
relion_options,
row,
):
if row.get("type") == "2D":
ncpb = relion_options.class2d_nr_classes
elif row.get("type") == "3D":
ncpb = relion_options.class3d_nr_classes
else:
ncpb = 0
row.update(
{
"number_of_particles_per_batch": relion_options.batch_size,
"number_of_classes_per_batch": ncpb,
"symmetry": relion_options.symmetry,
}
)
pid = primary_table.add_row(row)
return pid
@insert.register(ParticleClassificationTable)
def _(
primary_table: ParticleClassificationTable,
end_time,
source,
relion_options,
row,
):
pid = primary_table.add_row(row)
return pid
@insert.register(CryoemInitialModelTable)
def _(
primary_table: CryoemInitialModelTable,
end_time,
source,
relion_options,
row,
):
row["number_of_particles"] = row["init_model_number_of_particles"][
row["init_model_class_num"]
]
row.update({"resolution": relion_options.inimodel_resol_final})
pid = primary_table.add_row(row)
return pid
@insert.register(RelativeIceThicknessTable)
def _(
primary_table: RelativeIceThicknessTable,
end_time,
source,
relion_options,
row,
):
pid = primary_table.add_row(row)
return pid
|
#!/usr/bin/python3
# INSTRUCTIONS
# 1. Install Python3 for Windows
# https://www.python.org/downloads/release/python-380/
# 2. Download webdriver for you Chrome version Help > About Chrome
# https://sites.google.com/a/chromium.org/chromedriver/downloads
# 3. Unzip and place the chromedriver in the same folder as the script
# 4. Install Selenium "pip install selenium"
# 5. Update input.txt with your HPE credentials
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys # Module to send keystrokes
from selenium.webdriver.support.ui import WebDriverWait # Module to proceed when page loaded
from selenium.webdriver.support import expected_conditions as EC # Used by WebDriveWait module
from selenium.webdriver.common.by import By # Used by WebDriveWait module
# Read input_data.txt to get HPE credentials
f = open("input.txt", "r")
user = f.readline().strip()
password = f.readline().strip()
said = f.readline().strip()
f.close()
# Creating a webdriver instance
driver = webdriver.Chrome()
# Maximize window
driver.maximize_window()
driver.get('https://support.hpe.com/portal/site/hpsc/scm/home')
# Wait for page to load (blocking)
#time.sleep(2)
# Wait for page to load max 2s or until id "username" is located
WebDriverWait(driver,2).until(EC.presence_of_element_located((By.ID,"username")))
# Login
#===================================
driver.find_element_by_id('username').send_keys(user)
driver.find_element_by_id('password').send_keys(password)
driver.find_element_by_id('signIn').click()
# Print all ids on page, used for developing
#ids = driver.find_elements_by_xpath('//*[@id]')
#for i in ids:
# # print i.tag_name
# print(i.get_attribute('id'))
# Wait for page to load max 10s or until tag "iframe" is located
#WebDriverWait(driver,10).until(EC.presence_of_element_located((By.TAG_NAME,"iframe")))
time.sleep(10)
# Find the dynamic generated iframe id
iframeid = driver.find_element_by_tag_name('iframe')
# Need to switch to active iframe to find expected elements
driver.switch_to.frame(iframeid.get_attribute('id'))
# Provide SAID
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_txtSubmitCase').send_keys(said)
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_btnSubmitCase').click()
#time.sleep(15)
# Wait for page to load max 15s or until tag "iframe" is located
WebDriverWait(driver,15).until(EC.presence_of_element_located((By.TAG_NAME,"iframe")))
# Fill in default case details entries
#=====================================
# Find the dynamic generated iframe id
iframeid = driver.find_element_by_tag_name('iframe')
# Need to switch to active iframe to find expected elements
driver.switch_to.frame(iframeid.get_attribute('id'))
# Click email radio button
driver.find_element_by_xpath('//*[@id="ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_preferredContactUpdPanel"]/table/tbody/tr[2]/td[1]/a').click()
# Add time delay to avoid select/input issues
time.sleep(0.5)
# Select HPE Support Engineer radio button
driver.find_element_by_xpath('//*[@id="ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_preferredServiceUpdPanel"]/table/tbody/tr[2]/td[1]/a').click()
time.sleep(0.5)
# Click Severity drop down
driver.find_element_by_xpath('//*[@id="ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_ddlSeverity_title"]/tr/td[2]/span').click()
time.sleep(0.5)
# Select 3-Normal
driver.find_element_by_xpath('//*[@id="ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_ddlSeverity_child"]/ul/li[2]/span').click()
time.sleep(0.5)
# Operating system/version
driver.find_element_by_id('environmentsList').send_keys("Red Hat Enterprise Linux")
time.sleep(0.5)
# Contact hours/time zone
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_txtContactHoursORTimezone').send_keys('8am-5pm ET/CT')
time.sleep(0.5)
# Site access details
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_txtGSDSiteAccess').send_keys("Contact ticket issuer for details")
time.sleep(0.5)
# Problem description template
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_txtAreaCDProbDesc').send_keys('Description:\n\nActual Behavior:\n\nExpected Behavior:\n\nSteps to Reproduce:\n')
time.sleep(0.5)
# Troubleshooting steps template
driver.find_element_by_id('ctl00_ctl00_ContentPlaceHolder1_ContentPlaceHolder3Col_txtAreaCDSteps').send_keys('Troubleshooting Steps Attempted:\n\nWorkaround:\n')
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1ReplicaSet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ReplicaSetSpec',
'status': 'V1ReplicaSetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicaSet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1ReplicaSet. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ReplicaSet. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ReplicaSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ReplicaSet. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ReplicaSet. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ReplicaSet. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ReplicaSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ReplicaSet. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ReplicaSet. # noqa: E501
:return: The metadata of this V1ReplicaSet. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ReplicaSet.
:param metadata: The metadata of this V1ReplicaSet. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ReplicaSet. # noqa: E501
:return: The spec of this V1ReplicaSet. # noqa: E501
:rtype: V1ReplicaSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ReplicaSet.
:param spec: The spec of this V1ReplicaSet. # noqa: E501
:type: V1ReplicaSetSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1ReplicaSet. # noqa: E501
:return: The status of this V1ReplicaSet. # noqa: E501
:rtype: V1ReplicaSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ReplicaSet.
:param status: The status of this V1ReplicaSet. # noqa: E501
:type: V1ReplicaSetStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicaSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicaSet):
return True
return self.to_dict() != other.to_dict()
|
# commented out to prevent build from breaking
# this test is known to be broken, see comments in test
#from .test_form_api import *
|
import pandas as pd
sales=pd.read_csv('train_data.csv',sep='\s*,\s*',engine='python') #读取CSV
X=sales['X'].values #存csv的第一列
Y=sales['Y'].values #存csv的第二列
#初始化赋值
s1 = 0
s2 = 0
s3 = 0
s4 = 0
n = 4 ####你需要根据的数据量进行修改
#循环累加
for i in range(n):
s1 = s1 + X[i]*Y[i]
s2 = s2 + X[i]
s3 = s3 + Y[i]
s4 = s4 + X[i]*X[i]
#计算斜率和截距
b = (s2*s3-n*s1)/(s2*s2-s4*n)
a = (s3 - b*s2)/n
print("Coeff: {} Intercept: {}".format(b, a))
|
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Emerge hook to pre-parse and verify license information.
Called from src/scripts/hooks/install/gen-package-licenses.sh as part of a
package emerge.
"""
from __future__ import print_function
from chromite.lib import commandline
from chromite.licensing import licenses_lib
def main(args):
parser = commandline.ArgumentParser(usage=__doc__)
parser.add_argument('--builddir', type='path', dest='builddir',
help='Take $PORTAGE_BUILDDIR as argument.')
opts = parser.parse_args(args)
opts.Freeze()
licenses_lib.HookPackageProcess(opts.builddir)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import topics
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.nicira.common import config
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.dhcp_meta import nvp as nvp_svc
from neutron.plugins.nicira.dhcp_meta import rpc as nvp_rpc
LOG = logging.getLogger(__name__)
class DhcpMetadataAccess(object):
def setup_dhcpmeta_access(self):
"""Initialize support for DHCP and Metadata services."""
if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT:
self._setup_rpc_dhcp_metadata()
mod = nvp_rpc
elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
self._setup_nvp_dhcp_metadata()
mod = nvp_svc
self.handle_network_dhcp_access_delegate = (
mod.handle_network_dhcp_access
)
self.handle_port_dhcp_access_delegate = (
mod.handle_port_dhcp_access
)
self.handle_port_metadata_access_delegate = (
mod.handle_port_metadata_access
)
self.handle_metadata_access_delegate = (
mod.handle_router_metadata_access
)
def _setup_rpc_dhcp_metadata(self):
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = nvp_rpc.NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
self.conn.consume_in_thread()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
def _setup_nvp_dhcp_metadata(self):
# In agentless mode the following extensions, and related
# operations, are not supported; so do not publish them
if "agent" in self.supported_extension_aliases:
self.supported_extension_aliases.remove("agent")
if "dhcp_agent_scheduler" in self.supported_extension_aliases:
self.supported_extension_aliases.remove(
"dhcp_agent_scheduler")
nvp_svc.register_dhcp_opts(cfg)
nvp_svc.register_metadata_opts(cfg)
self.lsn_manager = nvp_svc.LsnManager(self)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
nvp_svc.DhcpAgentNotifyAPI(self, self.lsn_manager))
# In agentless mode, ports whose owner is DHCP need to
# be special cased; so add it to the list of special
# owners list
if const.DEVICE_OWNER_DHCP not in self.port_special_owners:
self.port_special_owners.append(const.DEVICE_OWNER_DHCP)
try:
error = None
nvp_svc.check_services_requirements(self.cluster)
except nvp_exc.NvpInvalidVersion:
error = _("Unable to run Neutron with config option '%s', as NSX "
"does not support it") % config.AgentModes.AGENTLESS
except nvp_exc.ServiceClusterUnavailable:
error = _("Unmet dependency for config option "
"'%s'") % config.AgentModes.AGENTLESS
if error:
LOG.exception(error)
raise nvp_exc.NvpPluginException(err_msg=error)
def handle_network_dhcp_access(self, context, network, action):
self.handle_network_dhcp_access_delegate(self, context,
network, action)
def handle_port_dhcp_access(self, context, port_data, action):
self.handle_port_dhcp_access_delegate(self, context, port_data, action)
def handle_port_metadata_access(self, context, port, is_delete=False):
self.handle_port_metadata_access_delegate(self, context,
port, is_delete)
def handle_router_metadata_access(self, context,
router_id, interface=None):
self.handle_metadata_access_delegate(self, context,
router_id, interface)
|
import threading
import time
import unittest
from urllib.request import urlopen
import requests
import pytest
from vcr_stub_server.stub_server_handler import BuildHandlerClassWithCassette
from vcr_stub_server.cassettes.vcrpy_cassette import VcrpyCassette
from http.server import HTTPServer
@pytest.fixture(scope="module")
def stub_server():
host = "localhost"
port = 8282
vcr_cassette = VcrpyCassette(
cassette_path="tests/fixtures/json_placeholder_crud.yaml"
)
handler_class = BuildHandlerClassWithCassette(vcr_cassette=vcr_cassette)
http_server = HTTPServer((host, port), handler_class)
server_thread = threading.Thread(target=http_server.serve_forever)
server_thread.start()
yield stub_server
http_server.shutdown()
server_thread.join()
def test_get_request(stub_server):
response = requests.get("http://localhost:8282/posts")
assert response.status_code == 200
def test_post_request(stub_server):
response = requests.post(
"http://localhost:8282/posts", json={"title": "foo", "body": "bar", "userId": 1}
)
assert response.status_code == 201
def test_patch_request(stub_server):
response = requests.patch("http://localhost:8282/posts/1", json={"body": "baz"})
assert response.status_code == 200
def test_put_request(stub_server):
response = requests.put(
"http://localhost:8282/posts/1",
json={"title": "foo", "body": "baz", "userId": 1},
)
assert response.status_code == 200
def test_delete_request(stub_server):
response = requests.delete("http://localhost:8282/posts/1")
assert response.status_code == 200
def test_response_not_found(stub_server):
response = requests.get("http://localhost:8282/unrecorded_request")
assert response.status_code == 500
|
"""
Codes are from:
https://github.com/jaxony/unet-pytorch/blob/master/model.py
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import init
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups,
)
def upconv2x2(in_channels, out_channels, mode="transpose"):
if mode == "transpose":
return nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
else:
# out_channels is always going to be the same
# as in_channels
return nn.Sequential(
nn.Upsample(mode="bilinear", scale_factor=2),
conv1x1(in_channels, out_channels),
)
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=1, groups=groups, stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(
self,
in_channels,
out_channels,
merge_mode="concat",
up_mode="transpose",
):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels, mode=self.up_mode)
if self.merge_mode == "concat":
self.conv1 = conv3x3(2 * self.out_channels, self.out_channels)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
def forward(self, from_down, from_up):
"""Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
from_up = self.upconv(from_up)
if self.merge_mode == "concat":
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
class UNet(nn.Module):
"""`UNet` class is based on https://arxiv.org/abs/1505.04597
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
(1) padding is used in 3x3 convolutions to prevent loss
of border pixels
(2) merging outputs does not require cropping due to (1)
(3) residual connections can be used by specifying
UNet(merge_mode='add')
(4) if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1 2d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(
self,
num_classes,
in_channels=3,
depth=5,
start_filts=64,
up_mode="transpose",
merge_mode="concat",
**kwargs
):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(UNet, self).__init__()
if up_mode in ("transpose", "upsample"):
self.up_mode = up_mode
else:
raise ValueError(
'"{}" is not a valid mode for '
'upsampling. Only "transpose" and '
'"upsample" are allowed.'.format(up_mode)
)
if merge_mode in ("concat", "add"):
self.merge_mode = merge_mode
else:
raise ValueError(
'"{}" is not a valid mode for'
"merging up and down paths. "
'Only "concat" and '
'"add" are allowed.'.format(up_mode)
)
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == "upsample" and self.merge_mode == "add":
raise ValueError(
'up_mode "upsample" is incompatible '
'with merge_mode "add" at the moment '
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half)."
)
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
# create the encoder pathway and add to a list
for i in range(depth):
ins = self.in_channels if i == 0 else outs
outs = self.start_filts * (2 ** i)
pooling = True if i < depth - 1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
# create the decoder pathway and add to a list
# - careful! decoding only requires depth-1 blocks
for i in range(depth - 1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode, merge_mode=merge_mode)
self.up_convs.append(up_conv)
# add the list of modules to current module
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.conv_final = conv1x1(outs, self.num_classes)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
encoder_outs = []
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i + 2)]
x = module(before_pool, x)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss is your training script,
# as this module includes a softmax already.
x = self.conv_final(x)
return x
if __name__ == "__main__":
"""
testing
"""
model = UNet(1, depth=5, merge_mode="concat", in_channels=1, start_filts=32)
print(model)
print(sum(p.numel() for p in model.parameters()))
reso = 176
x = np.zeros((1, 1, reso, reso))
x[:, :, int(reso / 2 - 1), int(reso / 2 - 1)] = np.nan
x = torch.FloatTensor(x)
out = model(x)
print("%f" % (torch.sum(torch.isnan(out)).detach().cpu().numpy() / (reso * reso)))
# loss = torch.sum(out)
# loss.backward()
|
from livesettings import *
from django.utils.translation import ugettext_lazy as _
gettext = lambda s:s
_strings = (gettext('CreditCard'), gettext('Credit Card'))
PAYMENT_GROUP = ConfigurationGroup('PAYMENT_TRUSTCOMMERCE',
_('TrustCommerce Payment Settings'),
ordering=102)
config_register_list(
StringValue(PAYMENT_GROUP,
'KEY',
description=_("Module key"),
hidden=True,
default = 'TRUSTCOMMERCE'),
ModuleValue(PAYMENT_GROUP,
'MODULE',
description=_('Implementation module'),
hidden=True,
default = 'payment.modules.trustcommerce'),
BooleanValue(PAYMENT_GROUP,
'AVS',
description=_("Use Address Verification System (AVS)?"),
default=False),
BooleanValue(PAYMENT_GROUP,
'LIVE',
description=_("Accept real payments"),
help_text=_("False if you want to be in test mode"),
default=False),
StringValue(PAYMENT_GROUP,
'AUTH_TYPE',
description=_("Type of authorization to perform."),
help_text = _("Refer to manual for details on the different types."),
default = 'sale',
choices = [('sale', _('Sale')),
('preauth', _('Preauth'))]
),
StringValue(PAYMENT_GROUP,
'LABEL',
description=_('English name for this group on the checkout screens'),
default = 'Credit Cards',
help_text = _('This will be passed to the translation utility')),
StringValue(PAYMENT_GROUP,
'URL_BASE',
description=_('The url base used for constructing urlpatterns which will use this module'),
default = r'^credit/'),
MultipleStringValue(PAYMENT_GROUP,
'CREDITCHOICES',
description=_('Available credit cards'),
choices = (
(('American Express', 'American Express')),
(('Visa','Visa')),
(('Mastercard','Mastercard')),
(('Discover','Discover'))),
default = ('Visa', 'Mastercard', 'Discover')),
StringValue(PAYMENT_GROUP,
'LOGIN',
description=_('Your TrustCommerce login'),
default=""),
StringValue(PAYMENT_GROUP,
'PASSWORD',
description=_('Your TrustCommerce password'),
default=""),
BooleanValue(PAYMENT_GROUP,
'EXTRA_LOGGING',
description=_("Verbose logs"),
help_text=_("Add extensive logs during post."),
default=False)
)
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/farm/shared_tatooine_flora_large.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
# -*- coding: utf-8 -*-
"""Tests for all choice generators."""
from fauxfactory import gen_choice
import string
import unittest
class TestChoices(unittest.TestCase):
"""Test choices generator."""
def test_gen_choice_1(self):
"""
@Test: Select a random value from integer values
@Feature: Choice Generator
@Assert: Selects a random choice from options
"""
choices = range(5)
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_2(self):
"""
@Test: Select a random value from alphanumeric values
@Feature: Choice Generator
@Assert: Selects a random choice from alphanumeric options
"""
choices = string.ascii_letters + string.digits
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_3(self):
"""
@Test: Select a random value from short list
@Feature: Choice Generator
@Assert: Selects a random choice from short list
"""
choices = [1, ]
for turn in range(10):
result = gen_choice(choices)
self.assertEqual(
result,
choices[0],
"An invalid value was selected from available choices.")
def test_gen_choice_4(self):
"""
@Test: Select a random value from longer list
@Feature: Choice Generator
@Assert: Selects a random choice from longer list
"""
choices = [1, 2, 3, 9, 10, 11, 100, 101, 102]
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_5(self):
"""
@Test: Select a random value from short tuple
@Feature: Choice Generator
@Assert: Selects a random choice from short tuple
"""
choices = (1, )
for turn in range(10):
result = gen_choice(choices)
self.assertEqual(
result,
choices[0],
"An invalid value was selected from available choices.")
def test_gen_choice_6(self):
"""
@Test: Select a random value from longer tuple
@Feature: Choice Generator
@Assert: Selects a random choice from longer tuple
"""
choices = (1, 2, 3, 9, 10, 11, 100, 101, 102, )
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_7(self):
"""
@Test: Select a random value from empty list
@Feature: Choice Generator
@Assert: No choice from empty list
"""
choices = []
with self.assertRaises(ValueError):
gen_choice(choices)
def test_gen_choice_8(self):
"""
@Test: Select a random value from empty tuple
@Feature: Choice Generator
@Assert: No choice from empty tuple
"""
choices = ()
with self.assertRaises(ValueError):
gen_choice(choices)
def test_gen_choice_9(self):
"""
@Test: Select a random value from empty dictionary
@Feature: Choice Generator
@Assert: No choice from empty dictionary
"""
choices = {}
with self.assertRaises(ValueError):
gen_choice(choices)
def test_gen_choice_10(self):
"""
@Test: Select a random value from single dictionary
@Feature: Choice Generator
@Assert: No choice from single dictionary
"""
choices = {'Name': 'Bob', 'Age': 39}
with self.assertRaises(ValueError):
gen_choice(choices)
def test_gen_choice_11(self):
"""
@Test: Select a random value from dictionary list
@Feature: Choice Generator
@Assert: Selects a value from a list of dictionaries
"""
choices = [
{'Name': 'Bob', 'Age': 39},
{'Name': 'Alice', 'Age': 23},
{'Name': 'Pete', 'Age': 79},
]
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_12(self):
"""
@Test: Select a random value from words list
@Feature: Choice Generator
@Assert: Selects a random choice from list
"""
choices = ['green', 'yellow', 'blue' 'white']
for turn in range(10):
result = gen_choice(choices)
self.assertIn(
result,
choices,
"An invalid value was selected from available choices.")
def test_gen_choice_13(self):
"""
@Test: Cannot use None for Choice generator
@Feature: Choice Generator
@Assert: ValueError is raised
"""
choices = None
with self.assertRaises(ValueError):
gen_choice(choices)
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Ashish Arora)
# Apache 2.0
# It contains utility functions for scoring. These functions are called from score.py
from shapely.geometry.polygon import Polygon
import numpy as np
from PIL import Image
def _evaluate_mask_image(mask_ref_arr, mask_hyp_arr, iou_threshold):
"""Given reference mask and hypothesis mask, it returns precision
and recall score. It requires same values of object pixels in the mask.
input
-----
mask_ref_arr (np array): ref array, contains same value for each line MAR
mask_hyp_arr (np array): hyp array, contains same value for each line MAR
iou_threshold (float): should be between 0 and 1, it decides if a match is
a good match or not.
Returns
-------
a dict that contains:
precision: (hypothesis matched)/(total hypothesis)
will satsify 0 <= precision <= 1
recall: (hypothesis matched)/(total reference)
will satsify 0 <= recall <= 1
pairs: list of matching hypothesis and reference pairs
"""
# replace 0 with a value between 0,255 assuming number
# of objects is less than 255
# 0 values is used to create a temporary blank image
mask_val_ref = np.unique(mask_ref_arr)
mask_val_hyp = np.unique(mask_hyp_arr)
for val in range(0,255):
if val not in mask_val_ref:
mask_ref_arr[mask_ref_arr == 0] = val
break
for val in range(0,255):
if val not in mask_val_hyp:
mask_hyp_arr[mask_hyp_arr == 0] = val
break
mask_val_ref = np.unique(mask_ref_arr)
mask_val_hyp = np.unique(mask_hyp_arr)
num_ref = len(mask_val_ref)
num_hyp = len(mask_val_hyp)
iou_score = np.zeros([num_ref, num_hyp])
# compute intersection over union value
# for each ref and hyp object pair. Store
# the value in iou_score matrix
# iou_score[ref_index, hyp_index] is the iou
# of reference object (ref_index) and hypothesis object (hyp_index).
for ref_index in range(num_ref):
for hyp_index in range(num_hyp):
val_ref = mask_val_ref[ref_index]
val_hyp = mask_val_hyp[hyp_index]
# for a given object, create a boolen image with object pixels as true and
# background pixels as false
temp_img_ref = Image.new('L', (mask_ref_arr.shape[1], mask_ref_arr.shape[0]), 0)
pixels = np.where(mask_ref_arr == val_ref,
mask_ref_arr, temp_img_ref)
ref_bool_arr = np.array(pixels, dtype=bool)
temp_img_hyp = Image.new('L', (mask_hyp_arr.shape[1], mask_hyp_arr.shape[0]), 0)
pixels = np.where(mask_hyp_arr == val_hyp,
mask_hyp_arr, temp_img_hyp)
hyp_bool_arr = np.array(pixels, dtype=bool)
# calculate intersection and union values
intersection = hyp_bool_arr * ref_bool_arr
union = hyp_bool_arr + ref_bool_arr
iou_score[ref_index, hyp_index] = intersection.sum() / float(union.sum())
# get stats for a given iou threshold value
score = get_stats(iou_score, iou_threshold)
return score
def _evaluate_text_file(ref_rect_list, hyp_rect_list, iou_threshold):
"""Given reference rectangle list and hypothesis rectangle list, it
returns precision and recall score. It requires reference and hypothesis
rectangle list to contain rectangle in each line. A rectangle is described
by 8 values (h1,w1,h2,w2,h3,w3,h4,w4)
input
-----
ref_rect_list [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values.
hyp_rect_list [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values.
iou_threshold (float): should be between 0 and 1, it decides if a match is
a good match or not.
Returns
-------
a dict that contains:
precision: (hypothesis matched)/(total hypothesis)
will satsify 0 <= precision <= 1
recall: (hypothesis matched)/(total reference)
will satsify 0 <= recall <= 1
pairs: list of matching hypothesis and reference pairs
"""
# get all polygons present in the file
ref_polygons = _get_polygons(ref_rect_list)
hyp_polygons = _get_polygons(hyp_rect_list)
num_ref = len(ref_polygons)
num_hyp = len(hyp_polygons)
# compute intersection over union value
# for each ref and hyp object pair. Store
# the value in iou_score matrix
# iou_score[ref_index, hyp_index] is the iou
# of reference object (ref_index) and hypothesis object (hyp_index).
iou_score = np.zeros([num_ref, num_hyp])
for ref_index in range(num_ref):
for hyp_index in range(num_hyp):
polygon_ref = ref_polygons[ref_index]
polygon_hyp = hyp_polygons[hyp_index]
iou_score[ref_index, hyp_index] = _get_intersection_over_union(polygon_hyp, polygon_ref)
# get stats for a given iou threshold value
score = get_stats(iou_score, iou_threshold)
return score
def get_mar_transcription_mapping(ref_rect_transcription_list, hyp_rect):
"""Given reference rectangle list and transcriptions and hypothesis
rectangle, it returns mapping between hypothesis rectangle
and reference transcription. It requires reference
rectangle list to contain rectangle in each line. A rectangle is described
by 8 values (h1,w1,h2,w2,h3,w3,h4,w4)
input
-----
ref_rect_list [([int], str)]: contains a list of tuple, contains a list of rectangle
and text and a rectangle is a list containing 8 integer values.
hyp_rect_list [int]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values.
Returns
-------
a dict that contains:
"""
# get all polygons present in the file
polygon_hyp = _get_rect_in_shapely_format(hyp_rect)
ref_polygons = _get_rect_list_in_shapely_format(ref_rect_transcription_list)
num_ref = len(ref_polygons)
# compute intersection over union value
# for each ref and hyp object pair. Store
# the value in iou_score matrix
# iou_score[ref_index, hyp_index] is the iou
# of reference object (ref_index) and hypothesis object (hyp_index).
iou_score = [0] * num_ref
for ref_index in range(num_ref):
polygon_ref = ref_polygons[ref_index]
iou_score[ref_index] = _get_intersection_over_union(polygon_hyp, polygon_ref)
max_index = np.argmax(iou_score)
ref_rect_transcription = ref_rect_transcription_list[max_index]
return ref_rect_transcription, max_index
def get_stats(iou_score, iou_threshold):
"""
Given iou score for each ref hyp pair, it returns the precision
and recall score.
input
-----
iou_score [num_ref, num_hyp]: iou score between ref and hyp pair,
all values will satsify 0 <= iou_score [imdex_ref, index_hyp] <= 1
iou_threshold (float): should be between 0 and 1, it decides if a match is
a good match or not.
return
-----
a dict that contains:
precision: (hypothesis matched)/(total hypothesis).
will satsify 0 <= precision <= 1.
recall: (hypothesis matched)/(total reference)
will satsify 0 <= recall <= 1
pairs: list of matching hypothesis and reference pairs
"""
hyp_matched = 0
pairs = []
num_ref = iou_score.shape[0]
num_hyp = iou_score.shape[1]
if_ref_object_matched = np.zeros(num_ref, np.int8)
if_hyp_object_matched = np.zeros(num_hyp, np.int8)
for ref_index in range(num_ref):
for hyp_index in range(num_hyp):
if if_ref_object_matched[ref_index] == 0 and if_hyp_object_matched[hyp_index] == 0:
if iou_score[ref_index, hyp_index] > iou_threshold:
if_ref_object_matched[ref_index] = 1
if_hyp_object_matched[hyp_index] = 1
hyp_matched += 1
pairs.append({'reference_data': ref_index, 'det': hyp_index})
# compute precision and recall value
if num_ref == 0:
recall = float(1)
precision = float(0) if num_hyp > 0 else float(1)
else:
recall = float(hyp_matched) / num_ref
precision = 0 if num_hyp == 0 else float(hyp_matched) / num_hyp
score = dict()
score['precision'] = precision
score['recall'] = recall
score['pairs'] = pairs
return score
def _get_intersection_over_union(hyp_rect, ref_rect):
"""Given two rectangles (hyp and ref) in shapely format,
it returns the IOU value. IOU value is the ratio between
the area of the intersection of the two polygons divided
by the area of their union.
Returns
-------
iou_val: (float)
"""
try:
rect_intersection = hyp_rect & ref_rect
intersection_area = rect_intersection.area
union_area = hyp_rect.area + ref_rect.area - intersection_area
iou_val = float(intersection_area) / union_area
return iou_val
except:
return 0
def _get_polygons(rect_list):
"""
Given a rectangle list, it convert and returns the rectangle
in shapely format. Shapely library is used to calculate intersection
and union area of two rectangles.
input
-----
rect_list [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values. These values are
(h1,w1,h2,w2,h3,w3,h4,w4)
return
------
rect_list: list of rectangle in shapely format
"""
rect_shapely = []
for n in range(len(rect_list)):
points = rect_list[n]
rect = _get_rect_in_shapely_format(points)
rect_shapely.append(rect)
return rect_shapely
def _get_rect_list_in_shapely_format(rect_list):
"""
Given a rectangle list, it convert and returns the rectangle
in shapely format. Shapely library is used to calculate intersection
and union area of two rectangles.
input
-----
rect_list [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values. These values are
(h1,w1,h2,w2,h3,w3,h4,w4)
return
------
rect_list: list of rectangle in shapely format
"""
rect_shapely = []
for n in range(len(rect_list)):
points = rect_list[n]
rect = _get_rect_in_shapely_format(points[0])
rect_shapely.append(rect)
return rect_shapely
def _get_rect_in_shapely_format(points):
rect_coordinate = np.empty([1, 8], dtype='int32')
rect_coordinate[0, 0] = int(points[0])
rect_coordinate[0, 4] = int(points[1])
rect_coordinate[0, 1] = int(points[2])
rect_coordinate[0, 5] = int(points[3])
rect_coordinate[0, 2] = int(points[4])
rect_coordinate[0, 6] = int(points[5])
rect_coordinate[0, 3] = int(points[6])
rect_coordinate[0, 7] = int(points[7])
rect_coordinate_reshaped = rect_coordinate[0].reshape([2, 4]).T
rect = Polygon(rect_coordinate_reshaped)
return rect
def get_score(ref, hyp, iou_threshold, if_eval_text_file=True):
"""
input
-----
If if_eval_text_file == true, then
ref : [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values.
hyp : [[int]]: contains a list of list, contains a list of rectangle
and a rectangle is a list containing 8 integer values.
else
ref : (np array): ref array, contains same value for each line MAR
hyp : (np array): hyp array, contains same value for each line MAR
iou_threshold (float): should be between 0 and 1, it decides if a match is
a good match or not.
if_eval_text_file: bool, checks if evaluation should be based on text file or
mask image
Returns
-------
a dict that contains:
precision: (hypothesis matched)/(total hypothesis)
will satsify 0 <= precision <= 1
recall: (hypothesis matched)/(total reference)
will satsify 0 <= recall <= 1
pairs: list of matching hypothesis and reference pairs
"""
if if_eval_text_file:
return _evaluate_text_file(ref, hyp, iou_threshold)
else:
return _evaluate_mask_image(ref, hyp, iou_threshold)
|
class Message(object):
__slots__ = ('id', 'sender', 'recipients', 'created_at', 'body')
def __init__(self, id, sender, recipients, created_at, body):
self.id = id
self.sender = sender
self.recipients = recipients
self.created_at = created_at
self.body = body
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.id == other.id and
self.sender == other.sender and
self.recipients == other.recipients and
self.created_at == other.created_at and
self.body == other.body)
def __ne__(self, other):
return not self.__eq__(other)
|
#!/usr/bin/env python3
from os import path
import codecs
from setuptools import setup, find_packages
import keyper
def run_setup():
"""Run package setup."""
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
try:
with codecs.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except:
# This happens when running tests
long_description = None
setup(
name='keyper',
version=keyper.__version__,
description='A utility for dealing with the macOS keychain.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Microsoft/keyper',
author='Dale Myers',
author_email='dalemy@microsoft.com',
license='MIT',
install_requires=[],
python_requires='>=3.7',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development',
'Topic :: Utilities'
],
keywords='apple, macOS, keychain, certificates, passwords',
packages=find_packages(exclude=['docs', 'tests'])
)
if __name__ == "__main__":
run_setup()
|
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data = pd.read_csv(path)
#print(data.head())
#Code starts here
data_sample = data.sample(n=sample_size, random_state=0)
#print(sample_data.shape)
sample_mean = data_sample['installment'].mean()
print("sample_mean: ",sample_mean)
sample_std = data_sample['installment'].std()
margin_of_error = z_critical * (sample_std / math.sqrt(sample_size))
print(margin_of_error)
confidence_interval = ((sample_mean-margin_of_error),(sample_mean+margin_of_error))
print("confidence_interval: ",confidence_interval)
true_mean = data['installment'].mean()
print("true_mean: ",true_mean)
#print(confidence_of_interval[0])
if confidence_interval[0] <= true_mean <=confidence_interval[1]:
print('Yes')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig ,axes = plt.subplots(nrows=3, ncols=1)
for i in range(len(sample_size)):
m=[]
for j in range(1000):
sample_data = data.sample(n=sample_size[i], random_state=0)
m.append(sample_data['installment'].mean())
mean_series = pd.Series(m)
#print(mean_series)
axes[i].plot(mean_series)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate'] = data['int.rate'].astype(str).str[:-1].astype(np.float)
data['int.rate'] = data['int.rate']/100
print("mean: ",data['int.rate'].mean())
z_statistic, p_value =ztest(data[data['purpose']=='small_business']['int.rate'], value=data['int.rate'].mean(), alternative='larger')
print("z_statistic: ",z_statistic)
print("p_value: ",p_value)
if(p_value<0.05):
print("Reject Null hypothesis")
else:
print("Accept Null hypothesis")
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
z_statistic, p_value = ztest(x1 = data[data['paid.back.loan']=='No']['installment'], x2 = data[data['paid.back.loan']=='Yes']['installment'])
print("z_statistic: ",z_statistic)
print("p_value: ",p_value)
if(p_value < 0.05):
print("Rejected Null hypothesis" )
else:
print("Accepted Null hypothesis" )
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
print('yes: \n',yes)
no = data[data['paid.back.loan']=='No']['purpose'].value_counts()
print('no: \n',no)
observed = pd.concat([yes.transpose(),no.transpose()], axis=1, keys=['Yes','No'])
print("observed: \n",observed)
chi2, p, dof, ex = chi2_contingency(observed)
print("chi2: ",chi2)
print("critical_value: ",critical_value)
if(chi2>critical_value):
print("Reject Null hypothesis")
else:
print("Accept Null hypothesis")
|
# We will register here processors for the message types by name
VM_ENGINE_REGISTER = dict()
def register_vm_engine(engine_name, engine_class):
""" Verifies a message is valid before forwarding it,
handling it (should it be different?).
"""
VM_ENGINE_REGISTER[engine_name] = engine_class
|
import json
import re
# Tests indices of coincidence of substrings with lengths up to 10
# returns the length with the highest index of coincidence
def getKeyLen(cipherText):
maxIndex = 0
keyLen = 1
for m in range(1, 11):
frequencies=[0 for i in range(26)]
numChars = 0
index = 0
# Counts the frequencies of each letter in the first substring based on key size
for i in range(len(cipherText)):
# This condition makes sure the character being tested will be in the string
# with key length m
if (i % m == 0):
# counts each letter
frequencies[ord(cipherText[i]) - 65] += 1
numChars += 1
for num in frequencies:
# Computes expected index of coincidence
index += (num/numChars) ** 2
# if this index of coincidence is the greatest, remember the index
# and the m. The split length with the greatest index of coincidence will
# most likely correspond to the English language.
if index > maxIndex:
maxIndex = index
keyLen = m
return keyLen
# Given a ciphertext and the length of the key,
# Look at frequencies to find the key
def getKey(cipherText, keyLen):
numLetters = len(cipherText)
# Frequencies of letters in the English language
standardFreq = [.082, .015, .028, .043, .127, .022, .020, .061, .070, .002, .008, .040, .024, .067, .075, .019, .001, .060, .063, .091, .028, .010, .023, .001, .020, .001]
# Gets characters that are affected by one specific key character
# eg. Keylength is "2" and cipher text is "abcdefghijkl"
# this will get either "a c e g i k"
# or "b d f h j l"
split = [cipherText[i:i+keyLen] for i in range(0, len(cipherText), keyLen)]
frequencies=[0 for i in range(26)]
key = ""
for n in range(0, keyLen):
# Count the number of occurences of each letter
frequencies=[0 for i in range(26)]
for i in split:
if (n < len(i)):
frequencies[ord(i[n]) - 65] += 1
maxMG = 0
maxMGindex = 0
# Try shifting the characters by each amount: 0 to 25
for g in range(26):
mg = 0
for freq in range(len(frequencies)):
mg += standardFreq[freq] * frequencies[(freq + g) % 26]/numLetters
if mg > maxMG:
maxMG = mg
maxMGindex = g
key = key + chr(maxMGindex+65)
return key
# Decodes vigenere ciphertext given a key
def decodeVig(cipherText, key):
plainText = ""
keyLen = len(key)
split = [cipherText[i:i+keyLen] for i in range(0, len(cipherText), keyLen)]
for i in split:
for j in range(keyLen):
if (j < len(i)):
# converts to int, mods to wrap around the alphabet, then converts back to character
plainText = plainText + chr((ord(i[j]) - 65 - (ord(key[j])-65)) % 26 + 65)
return plainText
def stripNonAlphabet(text):
return ''.join([i for i in text if i.isalpha()])
def lambda_handler(event, context):
C = event["body"]["cipherText"]
C = stripNonAlphabet(C).upper()
keyLen = getKeyLen(C)
key = getKey(C, keyLen)
plainText = decodeVig(C, key)
resp = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": {
"key": key,
"plainText": plainText
}
}
return resp
|
#!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
# Directory to search for configuration information on Unix.
# Constant used by test_platform to test linux_distribution().
_UNIXCONFDIR = '/etc'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary,pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir(_UNIXCONFDIR)
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
with open(os.path.join(_UNIXCONFDIR, file), 'r',
encoding='utf-8', errors='surrogateescape') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = list(map(str,ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from _gestalt import gestalt
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, OSError):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import _gestalt
except ImportError:
return None
# Get the infos
sysv, sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ','_')
platform = platform.replace('/','-')
platform = platform.replace('\\','-')
platform = platform.replace(':','-')
platform = platform.replace(';','-')
platform = platform.replace('"','-')
platform = platform.replace('(','-')
platform = platform.replace(')','-')
# No need to report 'unknown' information...
platform = platform.replace('unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system,node,release,version,machine,processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased,terse))
sys.exit(0)
|
from typing import List, Dict, Tuple, Sequence, Iterable
import pandas as pd
import numpy as np
class DataFrameCleaner(object):
def __init__(self):
pass
def clean(self, df: pd.DataFrame) -> pd.DataFrame:
result = (df.
pipe(self.copy_df).
pipe(self.drop_missing_columns).
pipe(self.to_category)
)
for column_name in result.select_dtypes(include='number').columns:
result = self.remove_outlier_rows(result, column_name)
return result
def copy_df(self, df):
return df.copy()
def drop_missing_columns(self, df, min_values_percent=40):
"""drops the columns with `min_values_percent` percent or more missing values"""
thresh = len(df) * ((100 - min_values_percent) / 100)
df.dropna(axis=1, thresh=thresh, inplace=True)
return df
def remove_outlier_rows(self, df, column_name, lower_quantile=0.05, upper_quantile=0.95):
"""keep the values between the 5th and 95th quantiles for numeric types"""
low = np.quantile(df[column_name].dropna(), 0.05)
high = np.quantile(df[column_name].dropna(), 0.95)
return df[df[column_name].between(low, high, inclusive=True)]
def to_category(self, df):
cols = df.select_dtypes(include='object').columns
for col in cols:
ratio = len(df[col].value_counts()) / len(df)
if ratio < 0.05:
df[col] = df[col].astype('category')
return df
|
from random import shuffle
from django.test import TestCase
from supplier.models import Supplier
from _helpers.tests import SUPPLIER, generate_phone, json_names_to_dic , numbers_generator
class SupplierTest(TestCase):
def setUp(self) -> None:
return super().setUp()
def test_create_supplier(self):
supplier = Supplier.objects.create(**SUPPLIER)
self.assertEqual(str(supplier), SUPPLIER['name'])
def test_create_bulk_suppliers(self):
limit = 100
names = json_names_to_dic(limit)
for name in names:
phone = generate_phone(name)
data = {'name':name, 'phone': phone}
SUPPLIER.update(data)
Supplier.objects.create(**SUPPLIER)
|
"""Private module that determines how data is encoded and serialized, to be able to send it over a wire, or save to disk"""
import base64
import io
import json
import numbers
import pickle
import uuid
import struct
import collections.abc
import numpy as np
import pyarrow as pa
import vaex
from .datatype import DataType
registry = {}
def register(name):
def wrapper(cls):
assert name not in registry, f'{name} already in registry: {registry[name]}'
registry[name] = cls
return cls
return wrapper
def make_class_registery(groupname):
_encoding_types = {}
def register_helper(cls):
name = cls.snake_name #name or getattr(cls, 'snake_name') or cls.__name__
_encoding_types[name] = cls
return cls
@register(groupname)
class encoding:
@staticmethod
def encode(encoding, obj):
spec = obj.encode(encoding)
spec[f'{groupname}-type'] = obj.snake_name
return spec
@staticmethod
def decode(encoding, spec, **kwargs):
spec = spec.copy()
type = spec.pop(f'{groupname}-type')
cls = _encoding_types[type]
return cls.decode(encoding, spec, **kwargs)
return register_helper
@register("json") # this will pass though data as is
class vaex_json_encoding:
@classmethod
def encode(cls, encoding, result):
return result
@classmethod
def decode(cls, encoding, result_encoded):
return result_encoded
@register("vaex-task-result")
class vaex_task_result_encoding:
@classmethod
def encode(cls, encoding, result):
return encoding.encode('vaex-evaluate-result', result)
@classmethod
def decode(cls, encoding, result_encoded):
return encoding.decode('vaex-evaluate-result', result_encoded)
@register("vaex-rmi-result")
class vaex_rmi_result_encoding:
@classmethod
def encode(cls, encoding, result):
return encoding.encode('json', result)
@classmethod
def decode(cls, encoding, result_encoded):
return encoding.decode('json', result_encoded)
@register("vaex-evaluate-result")
class vaex_evaluate_results_encoding:
@classmethod
def encode(cls, encoding, result):
if isinstance(result, (list, tuple)):
return [cls.encode(encoding, k) for k in result]
else:
return encoding.encode('array', result)
@classmethod
def decode(cls, encoding, result_encoded):
if isinstance(result_encoded, (list, tuple)):
return [cls.decode(encoding, k) for k in result_encoded]
else:
return encoding.decode('array', result_encoded)
@register("array")
class array_encoding:
@classmethod
def encode(cls, encoding, result):
if isinstance(result, np.ndarray):
return {'type': 'ndarray', 'data': encoding.encode('ndarray', result)}
elif isinstance(result, vaex.array_types.supported_arrow_array_types):
return {'type': 'arrow-array', 'data': encoding.encode('arrow-array', result)}
if isinstance(result, vaex.column.Column):
return {'type': 'column', 'data': encoding.encode('column', result)}
elif isinstance(result, numbers.Number):
try:
result = result.item() # for numpy scalars
except: # noqa
pass
return {'type': 'json', 'data': result}
else:
raise ValueError('Cannot encode: %r' % result)
@classmethod
def decode(cls, encoding, result_encoded):
return encoding.decode(result_encoded['type'], result_encoded['data'])
@register("arrow-array")
class arrow_array_encoding:
@classmethod
def encode(cls, encoding, array):
schema = pa.schema({'x': array.type})
with pa.BufferOutputStream() as sink:
with pa.ipc.new_stream(sink, schema) as writer:
writer.write_table(pa.table({'x': array}))
blob = sink.getvalue()
return {'arrow-ipc-blob': encoding.add_blob(blob)}
@classmethod
def decode(cls, encoding, result_encoded):
if 'arrow-serialized-blob' in result_encoded: # backward compatibility
blob = encoding.get_blob(result_encoded['arrow-serialized-blob'])
return pa.deserialize(blob)
else:
blob = encoding.get_blob(result_encoded['arrow-ipc-blob'])
with pa.BufferReader(blob) as source:
with pa.ipc.open_stream(source) as reader:
table = reader.read_all()
assert table.num_columns == 1
ar = table.column(0)
if len(ar.chunks) == 1:
ar = ar.chunks[0]
return ar
@register("ndarray")
class ndarray_encoding:
@classmethod
def encode(cls, encoding, array):
# if array.dtype.kind == 'O':
# raise ValueError('Numpy arrays with objects cannot be serialized: %r' % array)
mask = None
dtype = array.dtype
if np.ma.isMaskedArray(array):
values = array.data
mask = array.mask
else:
values = array
if values.dtype.kind in 'mM':
values = values.view(np.uint64)
if values.dtype.kind == 'O':
data = {
'values': values.tolist(), # rely on json encoding
'shape': array.shape,
'dtype': encoding.encode('dtype', DataType(dtype))
}
else:
data = {
'values': encoding.add_blob(values),
'shape': array.shape,
'dtype': encoding.encode('dtype', DataType(dtype))
}
if mask is not None:
data['mask'] = encoding.add_blob(mask)
return data
@classmethod
def decode(cls, encoding, result_encoded):
if isinstance(result_encoded, (list, tuple)):
return [cls.decode(encoding, k) for k in result_encoded]
else:
dtype = encoding.decode('dtype', result_encoded['dtype'])
shape = result_encoded['shape']
if dtype.kind == 'O':
data = result_encoded['values']
array = np.array(data, dtype=dtype.numpy)
else:
data = encoding.get_blob(result_encoded['values'])
array = np.frombuffer(data, dtype=dtype.numpy).reshape(shape)
if 'mask' in result_encoded:
mask_data = encoding.get_blob(result_encoded['mask'])
mask_array = np.frombuffer(mask_data, dtype=np.bool_).reshape(shape)
array = np.ma.array(array, mask=mask_array)
return array
@register("numpy-scalar")
class numpy_scalar_encoding:
@classmethod
def encode(cls, encoding, scalar):
if scalar.dtype.kind in 'mM':
value = int(scalar.astype(int))
else:
value = scalar.item()
return {'value': value, 'dtype': encoding.encode('dtype', DataType(scalar.dtype))}
@classmethod
def decode(cls, encoding, scalar_spec):
dtype = encoding.decode('dtype', scalar_spec['dtype'])
value = scalar_spec['value']
return np.array([value], dtype=dtype.numpy)[0]
@register("dtype")
class dtype_encoding:
@staticmethod
def encode(encoding, dtype):
dtype = DataType(dtype)
if dtype.is_list:
return {'type': 'list', 'value_type': encoding.encode('dtype', dtype.value_type)}
dtype = DataType(dtype).internal
return str(dtype)
@staticmethod
def decode(encoding, type_spec):
if isinstance(type_spec, dict):
if type_spec['type'] == 'list':
sub = encoding.decode('dtype', type_spec['value_type']).arrow
return DataType(pa.list_(sub))
else:
raise ValueError(f'Do not understand type {type_spec}')
if type_spec == 'string':
return DataType(pa.string())
if type_spec == 'large_string':
return DataType(pa.large_string())
# TODO: find a proper way to support all arrow types
if type_spec == 'timestamp[ms]':
return DataType(pa.timestamp('ms'))
else:
return DataType(np.dtype(type_spec))
@register("dataframe-state")
class dataframe_state_encoding:
@staticmethod
def encode(encoding, state):
return state
@staticmethod
def decode(encoding, state_spec):
return state_spec
@register("selection")
class selection_encoding:
@staticmethod
def encode(encoding, selection):
return selection.to_dict() if selection is not None else None
@staticmethod
def decode(encoding, selection_spec):
if selection_spec is None:
return None
selection = vaex.selections.selection_from_dict(selection_spec)
return selection
@register("function")
class function_encoding:
@staticmethod
def encode(encoding, function):
return vaex.serialize.to_dict(function.f)
@staticmethod
def decode(encoding, function_spec, trusted=False):
if function_spec is None:
return None
function = vaex.serialize.from_dict(function_spec, trusted=trusted)
return function
@register("variable")
class selection_encoding:
@staticmethod
def encode(encoding, obj):
if isinstance(obj, np.ndarray):
return {'type': 'ndarray', 'data': encoding.encode('ndarray', obj)}
elif isinstance(obj, vaex.array_types.supported_arrow_array_types):
return {'type': 'arrow-array', 'data': encoding.encode('arrow-array', obj)}
elif isinstance(obj, vaex.hash.ordered_set):
return {'type': 'ordered-set', 'data': encoding.encode('ordered-set', obj)}
elif isinstance(obj, np.generic):
return {'type': 'numpy-scalar', 'data': encoding.encode('numpy-scalar', obj)}
elif isinstance(obj, np.integer):
return obj.item()
elif isinstance(obj, np.floating):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.bytes_):
return obj.decode('UTF-8')
elif isinstance(obj, bytes):
return str(obj, encoding='utf-8');
else:
return obj
@staticmethod
def decode(encoding, obj_spec):
if isinstance(obj_spec, dict):
return encoding.decode(obj_spec['type'], obj_spec['data'])
else:
return obj_spec
@register("ordered-set")
class ordered_set_encoding:
@staticmethod
def encode(encoding, obj):
values = list(obj.extract().items())
clsname = obj.__class__.__name__
return {
'class': clsname,
'data': {
'values': values,
'count': obj.count,
'nan_count': obj.nan_count,
'missing_count': obj.null_count
}
}
@staticmethod
def decode(encoding, obj_spec):
clsname = obj_spec['class']
cls = getattr(vaex.hash, clsname)
value = cls(dict(obj_spec['data']['values']), obj_spec['data']['count'], obj_spec['data']['nan_count'], obj_spec['data']['missing_count'])
return value
class Encoding:
def __init__(self, next=None):
self.registry = {**registry}
self.blobs = {}
# for sharing objects
self._object_specs = {}
self._objects = {}
def set_object(self, id, obj):
assert id not in self._objects
self._objects[id] = obj
def get_object(self, id):
return self._objects[id]
def has_object(self, id):
return id in self._objects
def set_object_spec(self, id, obj):
assert id not in self._object_specs, f"Overwriting id {id}"
self._object_specs[id] = obj
def get_object_spec(self, id):
return self._object_specs[id]
def has_object_spec(self, id):
return id in self._object_specs
def encode(self, typename, value):
encoded = self.registry[typename].encode(self, value)
return encoded
def encode_collection(self, typename, values):
if isinstance(values, (list, tuple)):
return self.encode_list(typename, values)
elif isinstance(values, dict):
return self.encode_dict(typename, values)
else:
return self.encode(typename, values)
def encode_list(self, typename, values):
encoded = [self.registry[typename].encode(self, k) for k in values]
return encoded
def encode_list2(self, typename, values):
encoded = [self.encode_list(typename, k) for k in values]
return encoded
def encode_dict(self, typename, values):
encoded = {key: self.registry[typename].encode(self, value) for key, value in values.items()}
return encoded
def decode(self, typename, value, **kwargs):
decoded = self.registry[typename].decode(self, value, **kwargs)
return decoded
def decode_collection(self, typename, values, **kwargs):
if isinstance(values, (list, tuple)):
return self.decode_list(typename, values, **kwargs)
elif isinstance(values, dict):
return self.decode_dict(typename, values, **kwargs)
else:
return self.decode(typename, values)
def decode_list(self, typename, values, **kwargs):
decoded = [self.registry[typename].decode(self, k, **kwargs) for k in values]
return decoded
def decode_list2(self, typename, values, **kwargs):
decoded = [self.decode_list(typename, k, **kwargs) for k in values]
return decoded
def decode_dict(self, typename, values, **kwargs):
decoded = {key: self.registry[typename].decode(self, value, **kwargs) for key, value in values.items()}
return decoded
def add_blob(self, buffer):
bytes = memoryview(buffer).tobytes()
blob_id = vaex.cache.fingerprint(bytes)
self.blobs[blob_id] = bytes
return f'blob:{blob_id}'
def get_blob(self, blob_ref):
assert blob_ref.startswith('blob:')
blob_id = blob_ref[5:]
return self.blobs[blob_id]
class inline:
@staticmethod
def serialize(data, encoding):
import base64
blobs = {key: base64.b64encode(value).decode('ascii') for key, value in encoding.blobs.items()}
return json.dumps({'data': data, 'blobs': blobs})
@staticmethod
def deserialize(data, encoding):
data = json.loads(data)
encoding.blobs = {key: base64.b64decode(value.encode('ascii')) for key, value in data['blobs'].items()}
return data['data']
def _pack_blobs(*blobs):
count = len(blobs)
lenghts = [len(blob) for blob in blobs]
stream = io.BytesIO()
# header: <number of blobs>,<offset 0>, ... <offset N-1> with 8 byte unsigned ints
header_length = 8 * (2 + count)
offsets = (np.cumsum([0] + lenghts) + header_length).tolist()
stream.write(struct.pack(f'{count+2}q', count, *offsets))
for blob in blobs:
stream.write(blob)
bytes = stream.getvalue()
assert offsets[-1] == len(bytes)
return bytes
def _unpack_blobs(bytes):
stream = io.BytesIO(bytes)
count, = struct.unpack('q', stream.read(8))
offsets = struct.unpack(f'{count+1}q', stream.read(8 * (count + 1)))
assert offsets[-1] == len(bytes)
blobs = []
for i1, i2 in zip(offsets[:-1], offsets[1:]):
blobs.append(bytes[i1:i2])
return blobs
class binary:
@staticmethod
def serialize(data, encoding):
blob_refs = list(encoding.blobs.keys())
blobs = [encoding.blobs[k] for k in blob_refs]
json_blob = json.dumps({'data': data, 'blob_refs': blob_refs, 'objects': encoding._object_specs})
return _pack_blobs(json_blob.encode('utf8'), *blobs)
@staticmethod
def deserialize(data, encoding):
json_data, *blobs = _unpack_blobs(data)
json_data = json_data.decode('utf8')
json_data = json.loads(json_data)
data = json_data['data']
encoding.blobs = {key: blob for key, blob in zip(json_data['blob_refs'], blobs)}
encoding._object_specs = json_data['objects']
return data
def fingerprint(typename, object):
'''Use the encoding framework to calculate a fingerprint'''
encoding = vaex.encoding.Encoding()
jsonable = encoding.encode(typename, object)
blob_keys = list(encoding.blobs) # blob keys are hashes, so they are unique and enough for a fingerprint
return vaex.cache.fingerprint(jsonable, blob_keys)
serialize = binary.serialize
deserialize = binary.deserialize
|
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for Snowflake EDW service resource hosted on AWS."""
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import flags
from perfkitbenchmarker import providers
# https://docs.snowflake.net/manuals/user-guide/snowsql-config.html#snowsql-config-file
DEFAULT_CONFIG_LOCATION = '~/.snowsql/'
DEFAULT_CONFIG_FILE = 'config'
FLAGS = flags.FLAGS
class Snowflake(edw_service.EdwService):
"""Object representing a Snowflake Data Warehouse Instance hosted on AWS."""
CLOUD = providers.AWS
SERVICE_TYPE = 'snowflake_aws'
def __init__(self, edw_service_spec):
super(Snowflake, self).__init__(edw_service_spec)
# As per Snowflake architecture,
# https://docs.snowflake.net/manuals/user-guide/intro-key-concepts.html#snowflake-architecture # pylint: disable=line-too-long
# A snowflake account can host multiple "virtual warehouses", however
# the current benchmarking is limited to a single "virtual warehouse" as
# identified by the connection (FLAGS.snowflake_connection) expected to be
# defined in config file (FLAGS.snowflake_snowsql_config_override_file)
self.snowsql_config_file = (
FLAGS.snowflake_snowsql_config_override_file if
FLAGS.snowflake_snowsql_config_override_file else DEFAULT_CONFIG_FILE)
self.named_connection = FLAGS.snowflake_connection
def IsUserManaged(self, edw_service_spec):
# TODO(saksena): Remove the assertion after implementing provisioning of
# virtual warehouses.
return True
def _Create(self):
"""Create a Snowflake cluster."""
raise NotImplementedError
def _Exists(self):
"""Method to validate the existence of a Snowflake cluster.
Returns:
Boolean value indicating the existence of a cluster.
"""
return True
def InstallAndAuthenticateRunner(self, vm, benchmark_name):
"""Method to perform installation and authentication of snowsql client.
SnowSQL is a cli client to submit queries to a Snowflake Warehouse instance.
https://docs.snowflake.net/manuals/user-guide/snowsql.html
Args:
vm: Client vm on which the script will be run.
benchmark_name: String name of the benchmark, to allow extraction and
usage of benchmark specific artifacts (certificates, etc.) during client
vm preparation.
"""
vm.Install('snowsql')
if FLAGS.snowflake_snowsql_config_override_file:
vm.PushFile(self.snowsql_config_file,
DEFAULT_CONFIG_LOCATION + DEFAULT_CONFIG_FILE)
else:
vm.InstallPreprovisionedBenchmarkData(benchmark_name,
[self.snowsql_config_file],
DEFAULT_CONFIG_LOCATION)
def RunCommandHelper(self):
"""Snowflake specific run script command components."""
return '--connection {}'.format(FLAGS.snowflake_connection)
def _Delete(self):
"""Delete a Snowflake cluster."""
raise NotImplementedError
def GetMetadata(self):
"""Return a metadata dictionary of the benchmarked Snowflake cluster."""
basic_data = super(Snowflake, self).GetMetadata()
basic_data['snowflake_connection'] = self.named_connection
return basic_data
|
#!/usr/bin/env python
import re
import sys
import time
from urllib.parse import urlparse, parse_qs
import requests
from bs4 import BeautifulSoup
PAGE_URL = "http://www.draftscout.com/players.php?GenPos=%s&DraftYear=%d&sortby=PlayerId&order=ASC&startspot=%d"
PAGE_SIZE = 15
UA = "Mozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"
def request_with_retry(url, retries=5):
headers = {
'User-Agent': UA
}
try:
sys.stderr.write("Getting %s\n" % url)
r = requests.get(url, headers=headers)
r.raise_for_status()
soup = BeautifulSoup(r.content, "html5lib")
rows = soup.select('table.sortable tr')
assert len(rows) > 0, "No rows found"
return rows
except Exception as e:
if retries > 0:
sys.stderr.write("Getting %s\n" % url)
time.sleep(120)
request_with_retry(url, retries=retries - 1)
else:
raise e
def get_position(pos, year, page=1):
url = PAGE_URL % (pos, year, (page - 1) * PAGE_SIZE)
rows = request_with_retry(url)
header = rows[0]
player_rows = rows[1:]
num_players = len(player_rows)
sys.stderr.write("Found %d players\n" % num_players)
time.sleep(5)
players = []
if num_players == 0:
return []
else:
fields = [title.text.strip() for title in header.find_all('td')]
for row in player_rows:
player = dict(zip(fields, [x.text.strip() for x in row.find_all('td')]))
player_link = row.find('a', href=re.compile('dsprofile')).get('href')
parsed_url = urlparse(player_link)
parsed_qs = parse_qs(parsed_url.query)
player['DS PlayerId'] = parsed_qs['PlayerId'][0]
player['Position'] = pos
player['DraftYear'] = year
if 'Height' in player and player['Height']:
feet, inches = player['Height'].split('-', 1)
player['Height inches'] = 12*int(feet) + int(inches)
if 'Hometown, State' in player:
hometown, state = player['Hometown, State'].split(',', 1)
player['Hometown'] = hometown.strip()
player['Home State'] = state.replace(",", ", ").strip()
del(player['Hometown, State'])
player['Player Name'] = re.sub(r'^\*', '', player['Player Name'])
players.append(player)
#sys.stderr.write(player['Player Name'] + "\n")
if num_players < PAGE_SIZE:
return players
else:
return players + get_position(pos, year, page + 1)
def main():
import csv
writer = None
positions = (
'QB',
'RB',
'FB',
'TE',
'WR',
'C',
'OT',
'OG',
'K',
'DE',
'DT',
'ILB',
'OLB',
'CB',
'FS',
'SS',
'P',
)
for pos in positions:
players = {}
for i in range(1):
new_players = {p['DS PlayerId']: p for p in get_position(pos, 2019)}
sys.stderr.write("Players: %d\n" % len(new_players))
if len(new_players) > 0:
players.update(new_players)
time.sleep(60)
if writer is None:
sys.stderr.write("Opening writer\n")
writer = csv.DictWriter(sys.stdout, fieldnames=list(players.values())[0].keys())
writer.writeheader()
sys.stderr.write("Writing %d players\n" % len(players))
for player in players.values():
writer.writerow(player)
if __name__ == '__main__':
main()
|
def getMax(arr):
mid = (len(arr)-1)//2 # 4
start = 0
end = len(arr) - 1 # 9
while start <= end:
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]:
return arr[mid]
elif arr[mid] > arr[mid-1] and arr[mid] <= arr[mid+1]:
start = mid+1 # 5
mid = (start + end)//2 # 7
elif arr[mid] >= arr[mid-1] and arr[mid] <= arr[mid+1]:
end = mid - 1 #6
mid = (start + end)//2
elif end - start == 1:
return arr[mid]
arr = [1, 2, 3, 3, 4, 4, 4, 4, 4, 1]
print(getMax(arr))
|
from setuptools import setup, find_packages
setup(
name='sarafan',
version='0.1.0',
url='https://github.com/sarafanio/sarafan.git',
author='Sarafan Community',
author_email='flu2020@pm.me',
description='Sarafan node and client application. Sarafan is a distributed '
'publication delivery network for anonymous.',
packages=find_packages(),
install_requires=[
'core-service >= 0.2.0',
'stem >= 1.8.0',
'aiohttp >= 3.6.2',
'aiohttp-cors >= 0.7.0',
'aiohttp-socks >= 0.5.3',
'eth_abi >= 2.1.0',
'pycryptodomex >= 3.9.7',
'eth_account >= 0.4.0',
'yoyo_migrations >= 7.0.1',
'async-timeout >= 3.0.1',
'ConfigArgParse >= 1.2',
'colorama >= 0.4.3',
'dataclasses-json >= 0.5.2',
],
entry_points={
'console_scripts': [
'sarafan = sarafan.cli:cli',
],
},
extras_require={
'dev': [
'pytest',
'pytest-asyncio',
'pytest-cov',
'pylama',
'mypy',
'factory_boy',
]
}
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class UPackLimitedPackageMetadata(Model):
"""
:param version:
:type version: str
"""
_attribute_map = {
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, version=None):
super(UPackLimitedPackageMetadata, self).__init__()
self.version = version
class UPackLimitedPackageMetadataListResponse(Model):
"""
:param count:
:type count: int
:param value:
:type value: list of :class:`UPackLimitedPackageMetadata <azure.devops.v6_0.upack.models.UPackLimitedPackageMetadata>`
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': '[UPackLimitedPackageMetadata]'}
}
def __init__(self, count=None, value=None):
super(UPackLimitedPackageMetadataListResponse, self).__init__()
self.count = count
self.value = value
class UPackPackageMetadata(Model):
"""
:param description:
:type description: str
:param manifest_id:
:type manifest_id: str
:param super_root_id:
:type super_root_id: str
:param version:
:type version: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'manifest_id': {'key': 'manifestId', 'type': 'str'},
'super_root_id': {'key': 'superRootId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, description=None, manifest_id=None, super_root_id=None, version=None):
super(UPackPackageMetadata, self).__init__()
self.description = description
self.manifest_id = manifest_id
self.super_root_id = super_root_id
self.version = version
class UPackPackagePushMetadata(UPackPackageMetadata):
"""
:param description:
:type description: str
:param manifest_id:
:type manifest_id: str
:param super_root_id:
:type super_root_id: str
:param version:
:type version: str
:param proof_nodes:
:type proof_nodes: list of str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'manifest_id': {'key': 'manifestId', 'type': 'str'},
'super_root_id': {'key': 'superRootId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'proof_nodes': {'key': 'proofNodes', 'type': '[str]'}
}
def __init__(self, description=None, manifest_id=None, super_root_id=None, version=None, proof_nodes=None):
super(UPackPackagePushMetadata, self).__init__(description=description, manifest_id=manifest_id, super_root_id=super_root_id, version=version)
self.proof_nodes = proof_nodes
class UPackPackageVersionDeletionState(Model):
"""
Deletion state of a Universal package.
:param deleted_date: UTC date the package was deleted.
:type deleted_date: datetime
:param name: Name of the package.
:type name: str
:param version: Version of the package.
:type version: str
"""
_attribute_map = {
'deleted_date': {'key': 'deletedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, deleted_date=None, name=None, version=None):
super(UPackPackageVersionDeletionState, self).__init__()
self.deleted_date = deleted_date
self.name = name
self.version = version
__all__ = [
'UPackLimitedPackageMetadata',
'UPackLimitedPackageMetadataListResponse',
'UPackPackageMetadata',
'UPackPackagePushMetadata',
'UPackPackageVersionDeletionState',
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from mol_tree import Vocab, MolTree
from nnutils import create_var, avg_pool, index_select_ND, GRU
from jtnn_enc import JTNNEncoder
class ScaffoldGAN(nn.Module):
def __init__(self, jtnn, hidden_size, beta, gumbel=False):
super(ScaffoldGAN, self).__init__()
self.hidden_size = hidden_size
self.beta = beta
self.gumbel = gumbel
self.netG = Generator(jtnn.decoder)
self.netD = nn.Sequential(
nn.Linear(jtnn.hidden_size * 2, hidden_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_size, hidden_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_size, 1)
)
def reset_netG(self, jtnn):
self.netG = Generator(jtnn.decoder)
def encode_real(self, y_batch, jtnn):
#Generate real y_root features
y_batch, y_jtenc_holder, _ = y_batch
y_root_vecs = self.netG(y_jtenc_holder, depth=15)
return y_root_vecs
def encode_fake(self, z_batch, jtnn):
#Generate fake cond features
z_batch, z_jtenc_holder, z_mpn_holder = z_batch
z_tree_vecs, _, z_mol_vecs = jtnn.encode(z_jtenc_holder, z_mpn_holder)
z_tree_vecs_noised, z_mol_vecs_noised = jtnn.fuse_noise(z_tree_vecs, z_mol_vecs)
#Generate fake root features
pred_root_vecs = []
for i in xrange(len(z_batch)):
root_vec,_ = jtnn.decoder.soft_decode(
z_tree_vecs_noised[i].unsqueeze(0), z_mol_vecs_noised[i].unsqueeze(0),
gumbel=self.gumbel, slope=1.0, temp=1.0
)
pred_root_vecs.append(root_vec)
pred_root_vecs = torch.cat(pred_root_vecs, dim=0)
return pred_root_vecs
def train_D(self, x_batch, y_batch, jtnn):
real_vecs = self.encode_real(y_batch, jtnn).detach()
fake_vecs = self.encode_fake(x_batch, jtnn).detach()
real_score = self.netD(real_vecs)
fake_score = self.netD(fake_vecs)
score = fake_score.mean() - real_score.mean() #maximize -> minimize minus
score.backward()
#Gradient Penalty
inter_gp, inter_norm = self.gradient_penalty(real_vecs, fake_vecs)
inter_gp.backward()
return -score.item(), inter_norm
def train_G(self, x_batch, y_batch, jtnn):
real_vecs = self.encode_real(y_batch, jtnn)
fake_vecs = self.encode_fake(x_batch, jtnn)
real_score = self.netD(real_vecs)
fake_score = self.netD(fake_vecs)
score = real_score.mean() - fake_score.mean()
score.backward()
return score.item()
def gradient_penalty(self, real_vecs, fake_vecs):
eps = create_var(torch.rand(real_vecs.size(0), 1))
inter_data = eps * real_vecs + (1 - eps) * fake_vecs
inter_data = autograd.Variable(inter_data, requires_grad=True)
inter_score = self.netD(inter_data).squeeze(-1)
inter_grad = autograd.grad(inter_score, inter_data,
grad_outputs=torch.ones(inter_score.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
inter_norm = inter_grad.norm(2, dim=1)
inter_gp = ((inter_norm - 1) ** 2).mean() * self.beta
#inter_norm = (inter_grad ** 2).sum(dim=1)
#inter_gp = torch.max(inter_norm - 1, self.zero).mean() * self.beta
return inter_gp, inter_norm.mean().item()
class Generator(nn.Module):
def __init__(self, jtnn_decoder):
super(Generator, self).__init__()
self.hidden_size = jtnn_decoder.hidden_size
self.embedding = jtnn_decoder.embedding
self.W_z = jtnn_decoder.W_z
self.W_r = jtnn_decoder.W_r
self.U_r = jtnn_decoder.U_r
self.W_h = jtnn_decoder.W_h
def forward(self, holder, depth):
fnode = create_var(holder[0])
fmess = create_var(holder[1])
node_graph = create_var(holder[2])
mess_graph = create_var(holder[3])
scope = holder[4]
fnode = self.embedding(fnode)
x = index_select_ND(fnode, 0, fmess)
h = create_var(torch.zeros(mess_graph.size(0), self.hidden_size))
mask = torch.ones(h.size(0), 1)
mask[0] = 0 #first vector is padding
mask = create_var(mask)
for it in xrange(depth):
h_nei = index_select_ND(h, 0, mess_graph)
h = GRU(x, h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
h = h * mask
mess_nei = index_select_ND(h, 0, node_graph)
node_vecs = torch.cat([fnode, mess_nei.sum(dim=1)], dim=-1)
root_vecs = [node_vecs[st] for st,le in scope]
return torch.stack(root_vecs, dim=0)
def dfs(node, fa_idx):
max_depth = 0
for child in node.neighbors:
if child.idx == fa_idx: continue
max_depth = max(max_depth, dfs(child, node.idx))
return max_depth + 1
|
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from python_msx_sdk.api_client import ApiClient, Endpoint as _Endpoint
from python_msx_sdk.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from python_msx_sdk.model.error import Error
from python_msx_sdk.model.vulnerabilities_page import VulnerabilitiesPage
from python_msx_sdk.model.vulnerability_feed import VulnerabilityFeed
from python_msx_sdk.model.vulnerability_ingest_page import VulnerabilityIngestPage
from python_msx_sdk.model.vulnerability_ingestion import VulnerabilityIngestion
from python_msx_sdk.model.vulnerability_severity import VulnerabilitySeverity
class VulnerabilitiesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __get_ingest_vulnerabilities_tasks_page(
self,
page,
page_size,
**kwargs
):
"""Returns a filtered page of ingest tasks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ingest_vulnerabilities_tasks_page(page, page_size, async_req=True)
>>> result = thread.get()
Args:
page (int):
page_size (int):
Keyword Args:
start_date (datetime): Start date for date range filter on validation execution date.. [optional]
end_date (datetime): End date for date range filter on validation execution date.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VulnerabilityIngestPage
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['page'] = \
page
kwargs['page_size'] = \
page_size
return self.call_with_http_info(**kwargs)
self.get_ingest_vulnerabilities_tasks_page = _Endpoint(
settings={
'response_type': (VulnerabilityIngestPage,),
'auth': [],
'endpoint_path': '/vulnerability/api/v8/vulnerabilities/ingests',
'operation_id': 'get_ingest_vulnerabilities_tasks_page',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page',
'page_size',
'start_date',
'end_date',
],
'required': [
'page',
'page_size',
],
'nullable': [
],
'enum': [
],
'validation': [
'page',
'page_size',
]
},
root_map={
'validations': {
('page',): {
'inclusive_minimum': 0,
},
('page_size',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'page':
(int,),
'page_size':
(int,),
'start_date':
(datetime,),
'end_date':
(datetime,),
},
'attribute_map': {
'page': 'page',
'page_size': 'pageSize',
'start_date': 'startDate',
'end_date': 'endDate',
},
'location_map': {
'page': 'query',
'page_size': 'query',
'start_date': 'query',
'end_date': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ingest_vulnerabilities_tasks_page
)
def __get_vulnerabilities_page(
self,
page,
page_size,
**kwargs
):
"""Returns a filtered page of vulnerabilities. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerabilities_page(page, page_size, async_req=True)
>>> result = thread.get()
Args:
page (int):
page_size (int):
Keyword Args:
cve_id (str): CVE identifer (https://www.cvedetails.com/cve-help.php) to filter by.. [optional]
vendor (str): Vendor identifier (as defined in NIST's CPE dictionary) to filter by.. [optional]
product (str): Product identifier (as defined in NIST's CPE dictionary) to filter by.. [optional]
version (str): Product version (as defined in NIST's CPE dictionary) filter to filter by.. [optional]
severity (VulnerabilitySeverity): A CVSS severity level (https://nvd.nist.gov/vuln-metrics/cvss) to filter by.. [optional]
start_date (datetime): Start date for date range filter on CVE published date.. [optional]
end_date (datetime): End date for date range filter on CVE published date.. [optional]
year (int): Year CVE published filter.. [optional]
sort_by (str): [optional] if omitted the server will use the default value of "publishedOn"
sort_order (str): [optional] if omitted the server will use the default value of "asc"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VulnerabilitiesPage
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['page'] = \
page
kwargs['page_size'] = \
page_size
return self.call_with_http_info(**kwargs)
self.get_vulnerabilities_page = _Endpoint(
settings={
'response_type': (VulnerabilitiesPage,),
'auth': [],
'endpoint_path': '/vulnerability/api/v8/vulnerabilities',
'operation_id': 'get_vulnerabilities_page',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page',
'page_size',
'cve_id',
'vendor',
'product',
'version',
'severity',
'start_date',
'end_date',
'year',
'sort_by',
'sort_order',
],
'required': [
'page',
'page_size',
],
'nullable': [
],
'enum': [
'sort_by',
'sort_order',
],
'validation': [
'page',
'page_size',
'year',
]
},
root_map={
'validations': {
('page',): {
'inclusive_minimum': 0,
},
('page_size',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
('year',): {
'inclusive_maximum': 9999,
'inclusive_minimum': 1970,
},
},
'allowed_values': {
('sort_by',): {
"PUBLISHEDON": "publishedOn",
"MODIFIEDON": "modifiedOn"
},
('sort_order',): {
"ASC": "asc",
"DESC": "desc"
},
},
'openapi_types': {
'page':
(int,),
'page_size':
(int,),
'cve_id':
(str,),
'vendor':
(str,),
'product':
(str,),
'version':
(str,),
'severity':
(VulnerabilitySeverity,),
'start_date':
(datetime,),
'end_date':
(datetime,),
'year':
(int,),
'sort_by':
(str,),
'sort_order':
(str,),
},
'attribute_map': {
'page': 'page',
'page_size': 'pageSize',
'cve_id': 'cveId',
'vendor': 'vendor',
'product': 'product',
'version': 'version',
'severity': 'severity',
'start_date': 'startDate',
'end_date': 'endDate',
'year': 'year',
'sort_by': 'sortBy',
'sort_order': 'sortOrder',
},
'location_map': {
'page': 'query',
'page_size': 'query',
'cve_id': 'query',
'vendor': 'query',
'product': 'query',
'version': 'query',
'severity': 'query',
'start_date': 'query',
'end_date': 'query',
'year': 'query',
'sort_by': 'query',
'sort_order': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vulnerabilities_page
)
def __ingest_vulnerabilities(
self,
vulnerability_feed,
**kwargs
):
"""Ingests a CVE JSON feed into the Vulnerability Service datastore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ingest_vulnerabilities(vulnerability_feed, async_req=True)
>>> result = thread.get()
Args:
vulnerability_feed (VulnerabilityFeed):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VulnerabilityIngestion
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vulnerability_feed'] = \
vulnerability_feed
return self.call_with_http_info(**kwargs)
self.ingest_vulnerabilities = _Endpoint(
settings={
'response_type': (VulnerabilityIngestion,),
'auth': [],
'endpoint_path': '/vulnerability/api/v8/vulnerabilities/ingests',
'operation_id': 'ingest_vulnerabilities',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vulnerability_feed',
],
'required': [
'vulnerability_feed',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vulnerability_feed':
(VulnerabilityFeed,),
},
'attribute_map': {
},
'location_map': {
'vulnerability_feed': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__ingest_vulnerabilities
)
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise the wallet. Ported from wallet.sh.
# Does the following:
# a) creates 3 nodes, with an empty chain (no blocks).
# b) node0 mines a block
# c) node1 mines 101 blocks, so now nodes 0 and 1 have 50btc, node2 has none.
# d) node0 sends 21 btc to node2, in two transactions (11 btc, then 10 btc).
# e) node0 mines a block, collects the fee on the second transaction
# f) node1 mines 100 blocks, to mature node0's just-mined block
# g) check that node0 has 100-21, node2 has 21
# h) node0 should now have 2 unspent outputs; send these to node2 via raw tx broadcast by node1
# i) have node1 mine a block
# j) check balances - node0 should have 0, node2 should have 100
# k) test ResendWalletTransactions - create transactions, startup fourth node, make sure it syncs
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
self.nodes[2].settxfee(Decimal('0.001'))
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), Decimal('89.99900000'))
assert_equal(self.nodes[0].getbalance(), Decimal('10.00000000'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), Decimal('79.99900000'))
assert_equal(self.nodes[0].getbalance(), Decimal('19.99900000'))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), Decimal('69.99800000'))
assert_equal(self.nodes[0].getbalance(), Decimal('29.99900000'))
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), Decimal('59.99800000'))
assert_equal(self.nodes[0].getbalance(), Decimal('39.99800000'))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), set([txid1, txid2]))
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0.00000000'));
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2);
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), Decimal('59.99800000')); #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), Decimal('61.99800000')); #should not be
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2);
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), Decimal('63.99800000')); #should not be
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2.00000000'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.00100000'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-3")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.00100000'))
#this should fail
errorString = ""
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Invalid amount" in errorString, True);
errorString = ""
try:
self.nodes[0].generate("2") #use a string to as block amount parameter must fail because it's not interpreted as amount
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("not an integer" in errorString, True);
if __name__ == '__main__':
WalletTest ().main ()
|
import numpy as np
from pathlib import Path
from pvinspect.data import (
Image,
ModuleImage,
ImageSequence,
ModuleImageSequence,
EL_IMAGE,
)
from typing import List, Dict, Any, Optional
def assert_equal(value, target, precision=1e-3):
assert np.all(value > target - precision) and np.all(
value < target + precision
), "got value={}, target={}".format(value, target)
def random_image(lazy: bool = False, **kwargs) -> Image:
if lazy:
data = Image.LazyData(lambda: np.random.random((10, 10)))
else:
data = np.random.random((10, 10))
if "modality" not in kwargs.keys():
kwargs["modality"] = EL_IMAGE
if "path" not in kwargs.keys():
kwargs["path"] = Path() / "test.tif"
return Image(data, **kwargs)
def random_uint_image(lazy: bool = False, **kwargs) -> Image:
if lazy:
data = Image.LazyData(
lambda: (np.random.random((10, 10)) * 100).astype(np.uint32)
)
else:
data = (np.random.random((10, 10)) * 100).astype(np.uint32)
if "modality" not in kwargs.keys():
kwargs["modality"] = EL_IMAGE
if "path" not in kwargs.keys():
kwargs["path"] = Path() / "test.tif"
return Image(data, **kwargs)
def random_ubyte_image(lazy: bool = False, **kwargs) -> Image:
if lazy:
data = Image.LazyData(
lambda: (np.random.random((10, 10)) * 100).astype(np.uint8)
)
else:
data = (np.random.random((10, 10)) * 100).astype(np.uint32)
if "modality" not in kwargs.keys():
kwargs["modality"] = EL_IMAGE
if "path" not in kwargs.keys():
kwargs["path"] = Path() / "test.tif"
return Image(data, **kwargs)
def random_module_image() -> ModuleImage:
data = np.random.random((10, 10))
return ModuleImage(data, modality=EL_IMAGE, path=Path() / "test.tif")
def random_image_sequence(
N: int = 3, meta: Optional[List[Dict[str, Any]]] = None, **kwargs
) -> ImageSequence:
if meta is None:
imgs = [random_image(**kwargs) for i in range(N)]
else:
assert len(meta) == N
imgs = [random_image(meta=m, **kwargs) for m in meta]
return ImageSequence(imgs, False)
def random_uint_image_sequence(
N: int = 3, meta: Optional[List[Dict[str, Any]]] = None, **kwargs
) -> ImageSequence:
if meta is None:
imgs = [random_uint_image(**kwargs) for i in range(N)]
else:
assert len(meta) == N
imgs = [random_uint_image(meta=m, **kwargs) for m in meta]
return ImageSequence(imgs, False)
def random_ubyte_image_sequence(
N: int = 3, meta: Optional[List[Dict[str, Any]]] = None, **kwargs
) -> ImageSequence:
if meta is None:
imgs = [random_ubyte_image(**kwargs) for i in range(N)]
else:
assert len(meta) == N
imgs = [random_ubyte_image(meta=m, **kwargs) for m in meta]
return ImageSequence(imgs, False)
def random_module_image_sequence() -> ModuleImageSequence:
imgs = [random_module_image() for x in range(3)]
return ModuleImageSequence(imgs, False)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Main program for Bugdroid."""
import endpoints
import logging
import os
import cloudstorage as gcs
from endpoints import ResourceContainer
from google.appengine.api import app_identity
from protorpc import messages
from protorpc import message_types
from protorpc import remote
ALLOWED_CLIENT_IDS = [
endpoints.API_EXPLORER_CLIENT_ID,
'768213250012.apps.googleusercontent.com']
ENTITY_KEY = 'bugdroid_data'
class BugdroidData(messages.Message):
"""Collection of repo data files."""
data_files = messages.StringField(1)
DATA_UPDATE_REQUEST_RESOURCE_CONTAINER = ResourceContainer(
BugdroidData,
)
@endpoints.api(name='bugdroid', version='v1',
description='bugdroid API to manage data configs.',
allowed_client_ids=ALLOWED_CLIENT_IDS)
class BugdroidApi(remote.Service): # pragma: no cover
@endpoints.method(
message_types.VoidMessage,
BugdroidData,
path='data',
http_method='GET',
name='data.get')
def data_get(self, _):
bucket_name = app_identity.get_default_gcs_bucket_name()
object_path = '/' + bucket_name + '/' + ENTITY_KEY
data_files = None
with gcs.open(object_path) as f:
data_files = f.read()
data_files = data_files.decode('utf-8')
if data_files:
return BugdroidData(data_files=data_files)
else:
raise endpoints.NotFoundException()
@endpoints.method(
DATA_UPDATE_REQUEST_RESOURCE_CONTAINER,
message_types.VoidMessage,
path='data',
http_method='POST',
name='data.update')
def data_update(self, request):
bucket_name = app_identity.get_default_gcs_bucket_name()
object_path = '/' + bucket_name + '/' + ENTITY_KEY
with gcs.open(object_path, 'w') as f:
f.write(request.data_files.encode('utf-8') )
return message_types.VoidMessage()
endpoint_list = endpoints.api_server([BugdroidApi])
|
import pytz
import re
from datetime import timedelta
from . import util
class RunAt:
MINUTE_PATTERN = "(\d\d?):(\d\d?)"
HOUR_PATTERN = "(\d\d?):(\d\d?):(\d\d?)"
WEEKDAY_PATTERN = "(...) %s" % HOUR_PATTERN
PATTERNS = [MINUTE_PATTERN, HOUR_PATTERN, WEEKDAY_PATTERN]
WEEKDAY_MAP = {
"mon": 0,
"tue": 1,
"wed": 2,
"thu": 3,
"fri": 4,
"sat": 5,
"sun": 6
}
def __init__(self, period, at, now, preferred_timezone, offset=None):
self.period = period
self.at = at
self.offset = offset
self.now = now
self.preferred_timezone = preferred_timezone
self.minute = None
self.hour = None
self.wday = None
def parse(self):
# weekday pattern
regex = re.compile(self.WEEKDAY_PATTERN)
match = re.match(regex, self.at)
if match:
self.wday = self.WEEKDAY_MAP.get(match.group(1).lower(), 0)
self.hour = int(match.group(2))
self.minute = int(match.group(3))
return True
# hour pattern
regex = re.compile(self.HOUR_PATTERN)
match = re.match(regex, self.at)
if match:
self.hour = int(match.group(1))
self.minute = int(match.group(2))
return True
# minute pattern
regex = re.compile(self.MINUTE_PATTERN)
match = re.match(regex, self.at)
if match:
self.minute = int(match.group(1))
return True
def last_at(self):
self.offset = self.now - timedelta(seconds=self.period)
last_ran = self.next_at()
# force last ran to be in the past
attempts = 0
while last_ran >= self.now:
last_ran = last_ran - timedelta(seconds=self.period)
attempts += 1
if attempts > 10:
break
return last_ran
def next_at(self):
run_at = self.offset + timedelta(seconds=self.period)
if not self.at:
return run_at
matched = self.parse()
if not matched:
return self.offset
run_at_in_preferred_timezone = run_at
if self.preferred_timezone != pytz.utc:
run_at_in_preferred_timezone = run_at.astimezone(self.preferred_timezone)
if self.minute is not None:
run_at_in_preferred_timezone = run_at_in_preferred_timezone.replace(minute=self.minute)
if self.hour is not None:
run_at_in_preferred_timezone = run_at_in_preferred_timezone.replace(hour=self.hour)
if self.wday is not None:
run_at_in_preferred_timezone = util.next_weekday(self.wday, run_at_in_preferred_timezone)
run_at = run_at_in_preferred_timezone.astimezone(pytz.utc)
return run_at
|
from dateutil.tz import tzlocal
import stream
import time
from stream.exceptions import ApiKeyException, InputException
import random
import jwt
try:
from unittest.case import TestCase
except ImportError:
from unittest import TestCase
import json
import os
import sys
import datetime
import datetime as dt
import copy
import requests
from stream import serializer
from requests.exceptions import MissingSchema
from itertools import count
from uuid import uuid1
from uuid import uuid4
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
def connect_debug():
try:
key = os.environ["STREAM_KEY"]
secret = os.environ["STREAM_SECRET"]
except KeyError:
print(
"To run the tests the STREAM_KEY and STREAM_SECRET variables "
"need to be available. \n"
"Please create a pull request if you are an external "
"contributor, because these variables are automatically added "
"by Travis."
)
sys.exit(1)
return stream.connect(key, secret, location="qa", timeout=30)
client = connect_debug()
counter = count()
test_identifier = uuid4()
def get_unique_postfix():
return "---test_%s-feed_%s" % (test_identifier, next(counter))
def getfeed(feed_slug, user_id):
"""
Adds the random postfix to the user id
"""
return client.feed(feed_slug, user_id + get_unique_postfix())
def api_request_parse_validator(test):
def wrapper(meth):
def _parse_response(*args, **kwargs):
response = meth(*args, **kwargs)
test.assertTrue("duration" in response)
return response
return _parse_response
return wrapper
class ClientTest(TestCase):
def setUp(self):
client._parse_response = api_request_parse_validator(self)(
client._parse_response
)
# DEBUG account details
user1 = getfeed("user", "1")
user2 = getfeed("user", "2")
aggregated2 = getfeed("aggregated", "2")
aggregated3 = getfeed("aggregated", "3")
topic1 = getfeed("topic", "1")
flat3 = getfeed("flat", "3")
self.c = client
self.user1 = user1
self.user2 = user2
self.aggregated2 = aggregated2
self.aggregated3 = aggregated3
self.topic1 = topic1
self.flat3 = flat3
self.local_tests = False
if "LOCAL" in os.environ:
self.local_tests = os.environ["LOCAL"]
def _test_sleep(self, production_wait, local_wait):
"""
when testing against a live API, sometimes we need a small sleep to
ensure data stability, however when testing locally the wait does
not need to be as long
:param production_wait: float, number of seconds to sleep when hitting real API
:param local_wait: float, number of seconds to sleep when hitting localhost API
:return: None
"""
sleep_time = production_wait
if self.local_tests:
sleep_time = local_wait
time.sleep(sleep_time)
def test_collections_url(self):
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/meta/")
else:
self.assertEqual(
feed_url, "https://qa-api.stream-io-api.com/api/v1.0/meta/"
)
def test_personalization_url(self):
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if self.local_tests:
self.assertEqual(
feed_url, "http://localhost:8000/personalization/v1.0/recommended"
)
else:
self.assertEqual(
feed_url,
"https://qa-personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url(self):
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://qa-api.stream-io-api.com/api/v1.0/feed/"
)
def test_collections_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if not self.local_tests:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/meta/")
def test_personalization_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if not self.local_tests:
self.assertEqual(
feed_url,
"https://personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if not self.local_tests:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/feed/")
def test_collections_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if not self.local_tests:
self.assertEqual(
feed_url, "https://tokyo-api.stream-io-api.com/api/v1.0/meta/"
)
def test_personalization_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if not self.local_tests:
self.assertEqual(
feed_url,
"https://tokyo-personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if not self.local_tests:
self.assertEqual(
feed_url, "https://tokyo-api.stream-io-api.com/api/v1.0/feed/"
)
def test_update_activities_create(self):
activities = [
{
"actor": "user:1",
"verb": "do",
"object": "object:1",
"foreign_id": "object:1",
"time": datetime.datetime.utcnow().isoformat(),
}
]
self.c.update_activities(activities)
def test_update_activities_illegal_argument(self):
activities = dict()
def invalid_activities():
self.c.update_activities(activities)
self.assertRaises(TypeError, invalid_activities)
def test_update_activities_update(self):
activities = []
for i in range(0, 10):
activities.append(
{
"actor": "user:1",
"verb": "do",
"object": "object:%s" % i,
"foreign_id": "object:%s" % i,
"time": datetime.datetime.utcnow().isoformat(),
}
)
activities_created = self.user1.add_activities(activities)["activities"]
activities = copy.deepcopy(activities_created)
for activity in activities:
activity.pop("id")
activity["popularity"] = 100
self.c.update_activities(activities)
activities_updated = self.user1.get(limit=len(activities))["results"]
activities_updated.reverse()
for i, activity in enumerate(activities_updated):
self.assertEqual(activities_created[i].get("id"), activity.get("id"))
self.assertEqual(activity["popularity"], 100)
def test_heroku(self):
url = "https://thierry:pass@getstream.io/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "thierry")
self.assertEqual(client.api_secret, "pass")
self.assertEqual(client.app_id, "1")
def test_heroku_no_location(self):
url = "https://bvt88g4kvc63:twc5ywfste5bm2ngqkzs7ukxk3pn96yweghjrxcmcrarnt3j4dqj3tucbhym5wfd@stream-io-api.com/?app_id=669"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "bvt88g4kvc63")
self.assertEqual(
client.api_secret,
"twc5ywfste5bm2ngqkzs7ukxk3pn96yweghjrxcmcrarnt3j4dqj3tucbhym5wfd",
)
self.assertEqual(client.app_id, "669")
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/feed/")
def test_heroku_location_compat(self):
url = "https://ahj2ndz7gsan:gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy@us-east.getstream.io/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "ahj2ndz7gsan")
self.assertEqual(
client.api_secret,
"gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy",
)
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
)
self.assertEqual(client.app_id, "1")
def test_heroku_location(self):
url = "https://ahj2ndz7gsan:gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy@us-east.stream-io-api.com/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "ahj2ndz7gsan")
self.assertEqual(
client.api_secret,
"gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy",
)
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
)
self.assertEqual(client.app_id, "1")
def test_heroku_overwrite(self):
url = "https://thierry:pass@getstream.io/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect("a", "b", "c")
self.assertEqual(client.api_key, "a")
self.assertEqual(client.api_secret, "b")
self.assertEqual(client.app_id, "c")
def test_location_support(self):
client = stream.connect("a", "b", "c", location="us-east")
full_location = "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
if self.local_tests:
full_location = "http://localhost:8000/api/v1.0/feed/"
self.assertEqual(client.location, "us-east")
feed_url = client.get_full_url("api", "feed/")
self.assertEqual(feed_url, full_location)
# test a wrong location, can only work on non-local test running
if not self.local_tests:
client = stream.connect("a", "b", "c", location="nonexistant")
def get_feed():
client.feed("user", "1").get()
self.assertRaises(requests.exceptions.ConnectionError, get_feed)
def test_invalid_feed_values(self):
def invalid_feed_slug():
client.feed("user:", "1")
self.assertRaises(ValueError, invalid_feed_slug)
def invalid_user_id():
client.feed("user:", "1-a")
self.assertRaises(ValueError, invalid_user_id)
def invalid_follow_feed_slug():
self.user1.follow("user:", "1")
self.assertRaises(ValueError, invalid_follow_feed_slug)
def invalid_follow_user_id():
self.user1.follow("user", "1-:a")
self.assertRaises(ValueError, invalid_follow_user_id)
def test_token_retrieval(self):
self.user1.token
self.user1.get_readonly_token()
def test_user_session_token(self):
client = stream.connect(self.c.api_key, self.c.api_secret)
token = client.create_user_session_token("user")
payload = jwt.decode(token, self.c.api_secret)
self.assertEqual(payload["user_id"], "user")
token = client.create_user_session_token("user", client="python", testing=True)
payload = jwt.decode(token, self.c.api_secret)
self.assertEqual(payload["client"], "python")
self.assertEqual(payload["testing"], True)
def test_add_activity(self):
feed = getfeed("user", "py1")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
response = feed.add_activity(activity_data)
activity_id = response["id"]
activities = feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
def test_add_activity_to_inplace_change(self):
feed = getfeed("user", "py1")
team_feed = getfeed("user", "teamy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_data["to"] = [team_feed.id]
feed.add_activity(activity_data)
self.assertEqual(activity_data["to"], [team_feed.id])
def test_add_activities_to_inplace_change(self):
feed = getfeed("user", "py1")
team_feed = getfeed("user", "teamy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_data["to"] = [team_feed.id]
feed.add_activities([activity_data])
self.assertEqual(activity_data["to"], [team_feed.id])
def test_add_activity_to(self):
# test for sending an activities to the team feed using to
feeds = ["user", "teamy", "team_follower"]
user_feed, team_feed, team_follower_feed = map(
lambda x: getfeed("user", x), feeds
)
team_follower_feed.follow(team_feed.slug, team_feed.user_id)
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "to": [team_feed.id]}
response = user_feed.add_activity(activity_data)
activity_id = response["id"]
# see if the new activity is also in the team feed
activities = team_feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["origin"], None)
# see if the fanout process also works
activities = team_follower_feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["origin"], team_feed.id)
# and validate removing also works
user_feed.remove_activity(response["id"])
# check the user pyto feed
activities = team_feed.get(limit=1)["results"]
self.assertFirstActivityIDNotEqual(activities, activity_id)
# and the flat feed
activities = team_follower_feed.get(limit=1)["results"]
self.assertFirstActivityIDNotEqual(activities, activity_id)
def test_add_activity_to_type_error(self):
user_feed = getfeed("user", "1")
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "to": "string"}
self.assertRaises(TypeError, user_feed.add_activity, activity_data)
def assertFirstActivityIDEqual(self, activities, correct_activity_id):
activity_id = None
if activities:
activity_id = activities[0]["id"]
self.assertEqual(activity_id, correct_activity_id)
def assertFirstActivityIDNotEqual(self, activities, correct_activity_id):
activity_id = None
if activities:
activity_id = activities[0]["id"]
self.assertNotEqual(activity_id, correct_activity_id)
def test_remove_activity(self):
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 1)
self.user1.remove_activity(activity_id)
# verify that no activities were returned
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 0)
def test_remove_activity_by_foreign_id(self):
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:10",
}
self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 1)
self.user1.remove_activity(foreign_id="tweet:10")
# verify that no activities were returned
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 0)
# verify this doesnt raise an error, but fails silently
self.user1.remove_activity(foreign_id="tweet:unknowandmissing")
def test_add_activities(self):
activity_data = [
{"actor": 1, "verb": "tweet", "object": 1},
{"actor": 2, "verb": "watch", "object": 2},
]
response = self.user1.add_activities(activity_data)
activity_ids = [a["id"] for a in response["activities"]]
activities = self.user1.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
def test_add_activities_to(self):
pyto2 = getfeed("user", "pyto2")
pyto3 = getfeed("user", "pyto3")
to = [pyto2.id, pyto3.id]
activity_data = [
{"actor": 1, "verb": "tweet", "object": 1, "to": to},
{"actor": 2, "verb": "watch", "object": 2, "to": to},
]
response = self.user1.add_activities(activity_data)
activity_ids = [a["id"] for a in response["activities"]]
activities = self.user1.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
# test first target
activities = pyto2.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
# test second target
activities = pyto3.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
def test_follow_and_source(self):
feed = getfeed("user", "test_follow")
agg_feed = getfeed("aggregated", "test_follow")
actor_id = random.randint(10, 100000)
activity_data = {"actor": actor_id, "verb": "tweet", "object": 1}
activity_id = feed.add_activity(activity_data)["id"]
agg_feed.follow(feed.slug, feed.user_id)
activities = agg_feed.get(limit=3)["results"]
activity = self._get_first_aggregated_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertEqual(activity["origin"], feed.id)
self.assertEqual(activity_id_found, activity_id)
def test_follow_activity_copy_limit(self):
feed = getfeed("user", "test_follow_acl")
feed1 = getfeed("user", "test_follow_acl1")
actor_id = random.randint(10, 100000)
feed1.add_activity({"actor": actor_id, "verb": "tweet", "object": 1})
feed.follow(feed1.slug, feed1.user_id, activity_copy_limit=0)
activities = feed.get(limit=5)["results"]
self.assertEqual(len(activities), 0)
def test_follow_and_delete(self):
user_feed = getfeed("user", "test_follow")
agg_feed = getfeed("aggregated", "test_follow")
actor_id = random.randint(10, 100000)
activity_data = {"actor": actor_id, "verb": "tweet", "object": 1}
activity_id = user_feed.add_activity(activity_data)["id"]
agg_feed.follow(user_feed.slug, user_feed.user_id)
user_feed.remove_activity(activity_id)
activities = agg_feed.get(limit=3)["results"]
activity = self._get_first_aggregated_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertNotEqual(activity_id_found, activity_id)
def test_flat_follow(self):
feed = getfeed("user", "test_flat_follow")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = feed.add_activity(activity_data)["id"]
self.flat3.follow(feed.slug, feed.user_id)
activities = self.flat3.get(limit=3)["results"]
activity = self._get_first_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertEqual(activity_id_found, activity_id)
self.flat3.unfollow(feed.slug, feed.user_id)
activities = self.flat3.get(limit=3)["results"]
self.assertEqual(len(activities), 0)
def test_flat_follow_no_copy(self):
feed = getfeed("user", "test_flat_follow_no_copy")
follower = getfeed("flat", "test_flat_follow_no_copy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
feed.add_activity(activity_data)["id"]
follower.follow(feed.slug, feed.user_id, activity_copy_limit=0)
activities = follower.get(limit=3)["results"]
self.assertEqual(activities, [])
def test_flat_follow_copy_one(self):
feed = getfeed("user", "test_flat_follow_copy_one")
follower = getfeed("flat", "test_flat_follow_copy_one")
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "test:1",
}
feed.add_activity(activity_data)["id"]
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "test:2",
}
feed.add_activity(activity_data)["id"]
follower.follow(feed.slug, feed.user_id, activity_copy_limit=1)
activities = follower.get(limit=3)["results"]
# verify we get the latest activity
self.assertEqual(activities[0]["foreign_id"], "test:2")
def _get_first_aggregated_activity(self, activities):
try:
return activities[0]["activities"][0]
except IndexError:
pass
def _get_first_activity(self, activities):
try:
return activities[0]
except IndexError:
pass
def test_empty_followings(self):
asocial = getfeed("user", "asocialpython")
followings = asocial.following()
self.assertEqual(followings["results"], [])
def test_get_followings(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
social.follow("user", "cpy")
followings = social.following(offset=0, limit=2)
self.assertEqual(len(followings["results"]), 2)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:cpy")
followings = social.following(offset=1, limit=2)
self.assertEqual(len(followings["results"]), 2)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:bpy")
def test_empty_followers(self):
asocial = getfeed("user", "asocialpython")
followers = asocial.followers()
self.assertEqual(len(followers["results"]), 0)
self.assertEqual(followers["results"], [])
def test_get_followers(self):
social = getfeed("user", "psocial")
spammy1 = getfeed("user", "spammy1")
spammy2 = getfeed("user", "spammy2")
spammy3 = getfeed("user", "spammy3")
for feed in [spammy1, spammy2, spammy3]:
feed.follow("user", social.user_id)
followers = social.followers(offset=0, limit=2)
self.assertEqual(len(followers["results"]), 2)
self.assertEqual(followers["results"][0]["feed_id"], spammy3.id)
self.assertEqual(followers["results"][0]["target_id"], social.id)
followers = social.followers(offset=1, limit=2)
self.assertEqual(len(followers["results"]), 2)
self.assertEqual(followers["results"][0]["feed_id"], spammy2.id)
self.assertEqual(followers["results"][0]["target_id"], social.id)
def test_empty_do_i_follow(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
followings = social.following(feeds=["user:missingpy"])
self.assertEqual(len(followings["results"]), 0)
self.assertEqual(followings["results"], [])
def test_do_i_follow(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
followings = social.following(feeds=["user:apy"])
self.assertEqual(len(followings["results"]), 1)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:apy")
def test_update_activity_to_targets(self):
time = datetime.datetime.utcnow().isoformat()
foreign_id = "user:1"
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": foreign_id,
"time": time,
}
activity_data["to"] = ["user:1", "user:2"]
self.user1.add_activity(activity_data)
ret = self.user1.update_activity_to_targets(
foreign_id, time, new_targets=["user:3", "user:2"]
)
self.assertEqual(len(ret["activity"]["to"]), 2)
self.assertTrue("user:2" in ret["activity"]["to"])
self.assertTrue("user:3" in ret["activity"]["to"])
ret = self.user1.update_activity_to_targets(
foreign_id,
time,
added_targets=["user:4", "user:5"],
removed_targets=["user:3"],
)
self.assertEqual(len(ret["activity"]["to"]), 3)
self.assertTrue("user:2" in ret["activity"]["to"])
self.assertTrue("user:4" in ret["activity"]["to"])
self.assertTrue("user:5" in ret["activity"]["to"])
def test_get(self):
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = self.user1.add_activity(activity_data)["id"]
activity_data = {"actor": 2, "verb": "add", "object": 2}
activity_id_two = self.user1.add_activity(activity_data)["id"]
activity_data = {"actor": 3, "verb": "watch", "object": 2}
activity_id_three = self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=2)["results"]
# verify the first two results
self.assertEqual(len(activities), 2)
self.assertEqual(activities[0]["id"], activity_id_three)
self.assertEqual(activities[1]["id"], activity_id_two)
# try offset based
activities = self.user1.get(limit=2, offset=1)["results"]
self.assertEqual(activities[0]["id"], activity_id_two)
# try id_lt based
activities = self.user1.get(limit=2, id_lt=activity_id_two)["results"]
self.assertEqual(activities[0]["id"], activity_id)
def test_get_not_marked_seen(self):
notification_feed = getfeed("notification", "test_mark_seen")
activities = notification_feed.get(limit=3)["results"]
for activity in activities:
self.assertFalse(activity["is_seen"])
def test_mark_seen_on_get(self):
notification_feed = getfeed("notification", "test_mark_seen")
activities = notification_feed.get(limit=100)["results"]
for activity in activities:
notification_feed.remove_activity(activity["id"])
old_activities = [
notification_feed.add_activity({"actor": 1, "verb": "tweet", "object": 1}),
notification_feed.add_activity({"actor": 2, "verb": "add", "object": 2}),
notification_feed.add_activity({"actor": 3, "verb": "watch", "object": 3}),
]
notification_feed.get(
mark_seen=[old_activities[0]["id"], old_activities[1]["id"]]
)
activities = notification_feed.get(limit=3)["results"]
# is the seen state correct
for activity in activities:
# using a loop in case we're retrieving activities in a different order than old_activities
if old_activities[0]["id"] == activity["id"]:
self.assertTrue(activity["is_seen"])
if old_activities[1]["id"] == activity["id"]:
self.assertTrue(activity["is_seen"])
if old_activities[2]["id"] == activity["id"]:
self.assertFalse(activity["is_seen"])
# see if the state properly resets after we add another activity
notification_feed.add_activity(
{"actor": 3, "verb": "watch", "object": 3}
) # ['id']
activities = notification_feed.get(limit=3)["results"]
self.assertFalse(activities[0]["is_seen"])
self.assertEqual(len(activities[0]["activities"]), 2)
def test_mark_read_by_id(self):
notification_feed = getfeed("notification", "py2")
activities = notification_feed.get(limit=3)["results"]
ids = []
for activity in activities:
ids.append(activity["id"])
self.assertFalse(activity["is_read"])
ids = ids[:2]
notification_feed.get(mark_read=ids)
activities = notification_feed.get(limit=3)["results"]
for activity in activities:
if activity["id"] in ids:
self.assertTrue(activity["is_read"])
self.assertFalse(activity["is_seen"])
def test_api_key_exception(self):
self.c = stream.connect(
"5crf3bhfzesnMISSING",
"tfq2sdqpj9g446sbv653x3aqmgn33hsn8uzdc9jpskaw8mj6vsnhzswuwptuj9su",
)
self.user1 = self.c.feed("user", "1")
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"debug_example_undefined": "test",
}
self.assertRaises(
ApiKeyException, lambda: self.user1.add_activity(activity_data)
)
def test_complex_field(self):
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"participants": ["Tommaso", "Thierry"],
}
response = self.user1.add_activity(activity_data)
activity_id = response["id"]
activities = self.user1.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["participants"], ["Tommaso", "Thierry"])
def assertDatetimeAlmostEqual(self, a, b):
difference = abs(a - b)
if difference > datetime.timedelta(milliseconds=1):
self.assertEqual(a, b)
def assertClearlyNotEqual(self, a, b):
difference = abs(a - b)
if difference < datetime.timedelta(milliseconds=1):
raise ValueError("the dates are too close")
def test_uniqueness(self):
"""
In order for things to be considere unique they need:
a.) The same time and activity data
b.) The same time and foreign id
"""
utcnow = datetime.datetime.utcnow()
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "time": utcnow}
self.user1.add_activity(activity_data)
self.user1.add_activity(activity_data)
activities = self.user1.get(limit=2)["results"]
self.assertDatetimeAlmostEqual(activities[0]["time"], utcnow)
if len(activities) > 1:
self.assertClearlyNotEqual(activities[1]["time"], utcnow)
def test_uniqueness_topic(self):
"""
In order for things to be considere unique they need:
a.) The same time and activity data, or
b.) The same time and foreign id
"""
# follow both the topic and the user
self.flat3.follow("topic", self.topic1.user_id)
self.flat3.follow("user", self.user1.user_id)
# add the same activity twice
now = datetime.datetime.now(tzlocal())
tweet = "My Way %s" % get_unique_postfix()
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"time": now,
"tweet": tweet,
}
self.topic1.add_activity(activity_data)
self.user1.add_activity(activity_data)
# verify that flat3 contains the activity exactly once
response = self.flat3.get(limit=3)
activity_tweets = [a.get("tweet") for a in response["results"]]
self.assertEqual(activity_tweets.count(tweet), 1)
def test_uniqueness_foreign_id(self):
now = datetime.datetime.now(tzlocal())
utcnow = (now - now.utcoffset()).replace(tzinfo=None)
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:11",
"time": utcnow,
}
self.user1.add_activity(activity_data)
activity_data = {
"actor": 2,
"verb": "tweet",
"object": 3,
"foreign_id": "tweet:11",
"time": utcnow,
}
self.user1.add_activity(activity_data)
activities = self.user1.get(limit=10)["results"]
# the second post should have overwritten the first one (because they
# had same id)
self.assertEqual(len(activities), 1)
self.assertEqual(activities[0]["object"], "3")
self.assertEqual(activities[0]["foreign_id"], "tweet:11")
self.assertDatetimeAlmostEqual(activities[0]["time"], utcnow)
def test_time_ordering(self):
"""
datetime.datetime.utcnow() is our recommended approach
so if we add an activity
add one using time
add another activity it should be in the right spot
"""
# timedelta is used to "make sure" that ordering is known even though
# server time is not
custom_time = datetime.datetime.utcnow() - dt.timedelta(days=1)
feed = self.user2
for index, activity_time in enumerate([None, custom_time, None]):
self._test_sleep(1, 1) # so times are a bit different
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:%s" % index,
"time": activity_time,
}
feed.add_activity(activity_data)
activities = feed.get(limit=3)["results"]
# the second post should have overwritten the first one (because they
# had same id)
self.assertEqual(activities[0]["foreign_id"], "tweet:2")
self.assertEqual(activities[1]["foreign_id"], "tweet:0")
self.assertEqual(activities[2]["foreign_id"], "tweet:1")
self.assertDatetimeAlmostEqual(activities[2]["time"], custom_time)
def test_missing_actor(self):
activity_data = {
"verb": "tweet",
"object": 1,
"debug_example_undefined": "test",
}
doit = lambda: self.user1.add_activity(activity_data)
try:
doit()
raise ValueError("should have raised InputException")
except InputException:
pass
def test_wrong_feed_spec(self):
self.c = stream.connect(
"5crf3bhfzesnMISSING",
"tfq2sdqpj9g446sbv653x3aqmgn33hsn8uzdc9jpskaw8mj6vsnhzswuwptuj9su",
)
self.assertRaises(TypeError, lambda: getfeed("user1"))
def test_serialization(self):
today = datetime.date.today()
then = datetime.datetime.now().replace(microsecond=0)
now = datetime.datetime.now()
data = dict(
string="string",
float=0.1,
int=1,
date=today,
datetime=now,
datetimenomicro=then,
)
serialized = serializer.dumps(data)
loaded = serializer.loads(serialized)
self.assertEqual(data, loaded)
def test_follow_many(self):
sources = [getfeed("user", str(i)).id for i in range(10)]
targets = [getfeed("flat", str(i)).id for i in range(10)]
feeds = [{"source": s, "target": t} for s, t in zip(sources, targets)]
self.c.follow_many(feeds)
for target in targets:
follows = self.c.feed(*target.split(":")).followers()["results"]
self.assertEqual(len(follows), 1)
self.assertTrue(follows[0]["feed_id"] in sources)
self.assertEqual(follows[0]["target_id"], target)
for source in sources:
follows = self.c.feed(*source.split(":")).following()["results"]
self.assertEqual(len(follows), 1)
self.assertEqual(follows[0]["feed_id"], source)
self.assertTrue(follows[0]["target_id"] in targets)
def test_follow_many_acl(self):
sources = [getfeed("user", str(i)) for i in range(10)]
# ensure every source is empty first
for feed in sources:
activities = feed.get(limit=100)["results"]
for activity in activities:
feed.remove_activity(activity["id"])
targets = [getfeed("flat", str(i)) for i in range(10)]
# ensure every source is empty first
for feed in targets:
activities = feed.get(limit=100)["results"]
for activity in activities:
feed.remove_activity(activity["id"])
# add activity to each target feed
activity = {
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": datetime.datetime.utcnow().isoformat(),
}
for feed in targets:
feed.add_activity(activity)
self.assertEqual(len(feed.get(limit=5)["results"]), 1)
sources_id = [feed.id for feed in sources]
targets_id = [target.id for target in targets]
feeds = [{"source": s, "target": t} for s, t in zip(sources_id, targets_id)]
self.c.follow_many(feeds, activity_copy_limit=0)
for feed in sources:
activities = feed.get(limit=5)["results"]
self.assertEqual(len(activities), 0)
def test_add_to_many(self):
activity = {"actor": 1, "verb": "tweet", "object": 1, "custom": "data"}
feeds = [getfeed("flat", str(i)).id for i in range(10, 20)]
self.c.add_to_many(activity, feeds)
for feed in feeds:
feed = self.c.feed(*feed.split(":"))
self.assertEqual(feed.get()["results"][0]["custom"], "data")
def test_create_email_redirect(self):
target_url = "http://google.com/?a=b&c=d"
user_id = "tommaso"
impression = {
"foreign_ids": ["tweet:1", "tweet:2", "tweet:3", "tweet:4", "tweet:5"],
"feed_id": "user:global",
"user_id": user_id,
"location": "email",
}
engagement = {
"user_id": user_id,
"label": "click",
"feed_id": "user:global",
"location": "email",
"position": 3,
"foreign_id": "tweet:1",
}
events = [impression, engagement]
redirect_url = self.c.create_redirect_url(target_url, user_id, events)
parsed_url = urlparse(redirect_url)
qs = parse_qs(parsed_url.query)
decoded = jwt.decode(qs["authorization"][0], self.c.api_secret)
self.assertEqual(
decoded,
{
"resource": "redirect_and_track",
"action": "*",
"feed_id": "*",
"user_id": "tommaso",
},
)
expected_params = {
"auth_type": "jwt",
"url": target_url,
"api_key": self.c.api_key,
}
for k, v in expected_params.items():
self.assertEqual(qs[k][0], v)
self.assertEqual(json.loads(qs["events"][0]), events)
def test_email_redirect_invalid_target(self):
engagement = {
"foreign_id": "tweet:1",
"label": "click",
"position": 3,
"user_id": "tommaso",
"location": "email",
"feed_id": "user:global",
}
impression = {
"foreign_ids": ["tweet:1", "tweet:2", "tweet:3", "tweet:4", "tweet:5"],
"user_id": "tommaso",
"location": "email",
"feed_id": "user:global",
}
events = [impression, engagement]
# no protocol specified, this should raise an error
target_url = "google.com"
user_id = "tommaso"
create_redirect = lambda: self.c.create_redirect_url(
target_url, user_id, events
)
self.assertRaises(MissingSchema, create_redirect)
def test_follow_redirect_url(self):
target_url = "http://google.com/?a=b&c=d"
events = []
user_id = "tommaso"
redirect_url = self.c.create_redirect_url(target_url, user_id, events)
res = requests.get(redirect_url)
res.raise_for_status()
self.assertTrue("google" in res.url)
def test_get_activities_empty_ids(self):
response = self.c.get_activities(ids=[str(uuid1())])
self.assertEqual(len(response["results"]), 0)
def test_get_activities_empty_foreign_ids(self):
response = self.c.get_activities(
foreign_id_times=[("fid-x", datetime.datetime.utcnow())]
)
self.assertEqual(len(response["results"]), 0)
def test_get_activities_full(self):
dt = datetime.datetime.utcnow()
fid = "awesome-test"
activity = {
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": dt,
"foreign_id": fid,
}
feed = getfeed("user", "test_get_activity")
response = feed.add_activity(activity)
response = self.c.get_activities(ids=[response["id"]])
self.assertEqual(len(response["results"]), 1)
self.assertEqual(activity["foreign_id"], response["results"][0]["foreign_id"])
response = self.c.get_activities(foreign_id_times=[(fid, dt)])
self.assertEqual(len(response["results"]), 1)
self.assertEqual(activity["foreign_id"], response["results"][0]["foreign_id"])
def test_activity_partial_update(self):
now = datetime.datetime.utcnow()
feed = self.c.feed("user", uuid4())
feed.add_activity(
{
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": now,
"foreign_id": "fid:123",
"product": {"name": "shoes", "price": 9.99, "color": "blue"},
}
)
activity = feed.get()["results"][0]
set = {
"product.name": "boots",
"product.price": 7.99,
"popularity": 1000,
"foo": {"bar": {"baz": "qux"}},
}
unset = ["product.color"]
# partial update by ID
self.c.activity_partial_update(id=activity["id"], set=set, unset=unset)
updated = feed.get()["results"][0]
expected = activity
expected["product"] = {"name": "boots", "price": 7.99}
expected["popularity"] = 1000
expected["foo"] = {"bar": {"baz": "qux"}}
self.assertEqual(updated, expected)
# partial update by foreign ID + time
set = {"foo.bar.baz": 42, "popularity": 9000}
unset = ["product.price"]
self.c.activity_partial_update(
foreign_id=activity["foreign_id"],
time=activity["time"],
set=set,
unset=unset,
)
updated = feed.get()["results"][0]
expected["product"] = {"name": "boots"}
expected["foo"] = {"bar": {"baz": 42}}
expected["popularity"] = 9000
self.assertEqual(updated, expected)
def test_create_reference(self):
ref = self.c.collections.create_reference("item", "42")
self.assertEqual(ref, "SO:item:42")
def test_create_user_reference(self):
ref = self.c.users.create_reference("42")
self.assertEqual(ref, "SU:42")
def test_reaction_add(self):
self.c.reactions.add("like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike")
def test_reaction_get(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
reaction = self.c.reactions.get(response["id"])
self.assertEqual(reaction["parent"], "")
self.assertEqual(reaction["data"], {})
self.assertEqual(reaction["latest_children"], {})
self.assertEqual(reaction["children_counts"], {})
self.assertEqual(
reaction["activity_id"], "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4"
)
self.assertEqual(reaction["kind"], "like")
self.assertTrue("created_at" in reaction)
self.assertTrue("updated_at" in reaction)
self.assertTrue("id" in reaction)
def test_reaction_update(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.update(response["id"], {"changed": True})
def test_reaction_delete(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.delete(response["id"])
def test_reaction_add_child(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.add_child("like", response["id"], "rob")
def test_reaction_filter_random(self):
self.c.reactions.filter(
kind="like",
reaction_id="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
)
self.c.reactions.filter(
activity_id="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
)
self.c.reactions.filter(
user_id="mike", id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4"
)
def _first_result_should_be(self, response, element):
el = element.copy()
el.pop("duration")
self.assertEqual(len(response["results"]), 1)
self.assertEqual(response["results"][0], el)
def test_reaction_filter(self):
activity_id = str(uuid1())
user = str(uuid1())
response = self.c.reactions.add("like", activity_id, user)
child = self.c.reactions.add_child("like", response["id"], user)
reaction = self.c.reactions.get(response["id"])
response = self.c.reactions.add("comment", activity_id, user)
reaction_comment = self.c.reactions.get(response["id"])
r = self.c.reactions.filter(reaction_id=reaction["id"])
self._first_result_should_be(r, child)
r = self.c.reactions.filter(kind="like", activity_id=activity_id, id_lte=reaction["id"])
self._first_result_should_be(r, reaction)
r = self.c.reactions.filter(kind="like", user_id=user, id_lte=reaction["id"])
self._first_result_should_be(r, reaction)
r = self.c.reactions.filter(kind="comment", activity_id=activity_id)
self._first_result_should_be(r, reaction_comment)
def test_user_add(self):
self.c.users.add(str(uuid1()))
def test_user_add_get_or_create(self):
user_id = str(uuid1())
r1 = self.c.users.add(user_id)
r2 = self.c.users.add(user_id, get_or_create=True)
self.assertEqual(r1["id"], r2["id"])
self.assertEqual(r1["created_at"], r2["created_at"])
self.assertEqual(r1["updated_at"], r2["updated_at"])
def test_user_get(self):
response = self.c.users.add(str(uuid1()))
user = self.c.users.get(response["id"])
self.assertEqual(user["data"], {})
self.assertTrue("created_at" in user)
self.assertTrue("updated_at" in user)
self.assertTrue("id" in user)
def test_user_update(self):
response = self.c.users.add(str(uuid1()))
self.c.users.update(response["id"], {"changed": True})
def test_user_delete(self):
response = self.c.users.add(str(uuid1()))
self.c.users.delete(response["id"])
def test_collections_add(self):
self.c.collections.add("items", {"data": 1}, id=str(uuid1()), user_id="tom")
def test_collections_add_no_id(self):
self.c.collections.add("items", {"data": 1})
def test_collections_get(self):
response = self.c.collections.add("items", {"data": 1}, id=str(uuid1()))
entry = self.c.collections.get("items", response["id"])
self.assertEqual(entry["data"], {"data": 1})
self.assertTrue("created_at" in entry)
self.assertTrue("updated_at" in entry)
self.assertTrue("id" in entry)
def test_collections_update(self):
response = self.c.collections.add("items", {"data": 1}, str(uuid1()))
self.c.collections.update("items", response["id"], data={"changed": True})
entry = self.c.collections.get("items", response["id"])
self.assertEqual(entry["data"], {"changed": True})
def test_collections_delete(self):
response = self.c.collections.add("items", {"data": 1}, str(uuid1()))
self.c.collections.delete("items", response["id"])
def test_feed_enrichment_collection(self):
entry = self.c.collections.add("items", {"name": "time machine"})
entry.pop("duration")
f = getfeed("user", "mike")
activity_data = {
"actor": "mike",
"verb": "buy",
"object": self.c.collections.create_reference(entry=entry),
}
f.add_activity(activity_data)
response = f.get()
self.assertTrue(
set(activity_data.items()).issubset(set(response["results"][0].items()))
)
enriched_response = f.get(enrich=True)
self.assertEqual(enriched_response["results"][0]["object"], entry)
def test_feed_enrichment_user(self):
user = self.c.users.add(str(uuid1()), {"name": "Mike"})
user.pop("duration")
f = getfeed("user", "mike")
activity_data = {
"actor": self.c.users.create_reference(user),
"verb": "buy",
"object": "time machine",
}
f.add_activity(activity_data)
response = f.get()
self.assertTrue(
set(activity_data.items()).issubset(set(response["results"][0].items()))
)
enriched_response = f.get(enrich=True)
self.assertEqual(enriched_response["results"][0]["actor"], user)
def test_feed_enrichment_own_reaction(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"own": True}, user_id="mike")
self.assertEqual(
enriched_response["results"][0]["own_reactions"]["like"][0], reaction
)
def test_feed_enrichment_recent_reaction(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"recent": True})
self.assertEqual(
enriched_response["results"][0]["latest_reactions"]["like"][0], reaction
)
def test_feed_enrichment_reaction_counts(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"counts": True})
self.assertEqual(enriched_response["results"][0]["reaction_counts"]["like"], 1)
|
import numpy as np
class ExperienceReplay:
def __init__(self,
num_frame_stack=4,
capacity=int(1e5),
pic_size=(96, 96)
):
self.num_frame_stack = num_frame_stack
self.capacity = capacity
self.pic_size = pic_size
self.counter = 0
self.frame_window = None
self.init_caches()
self.expecting_new_episode = True
def add_experience(self, frame, action, done, reward):
assert self.frame_window is not None, "start episode first"
self.counter += 1
frame_idx = self.counter % self.max_frame_cache
exp_idx = (self.counter - 1) % self.capacity
self.prev_states[exp_idx] = self.frame_window
self.frame_window = np.append(self.frame_window[1:], frame_idx)
self.next_states[exp_idx] = self.frame_window
self.actions[exp_idx] = action
self.is_done[exp_idx] = done
self.frames[frame_idx] = frame
self.rewards[exp_idx] = reward
if done:
self.expecting_new_episode = True
def start_new_episode(self, frame):
# it should be okay not to increment counter here
# because episode ending frames are not used
assert self.expecting_new_episode, "previous episode didn't end yet"
frame_idx = self.counter % self.max_frame_cache
self.frame_window = np.repeat(frame_idx, self.num_frame_stack)
self.frames[frame_idx] = frame
self.expecting_new_episode = False
def sample_mini_batch(self, n):
count = min(self.capacity, self.counter)
batchidx = np.random.randint(count, size=n)
prev_frames = self.frames[self.prev_states[batchidx]]
next_frames = self.frames[self.next_states[batchidx]]
prev_frames = np.moveaxis(prev_frames, 1, -1)
next_frames = np.moveaxis(next_frames, 1, -1)
return {
"reward": self.rewards[batchidx],
"prev_state": prev_frames,
"next_state": next_frames,
"actions": self.actions[batchidx],
"done_mask": self.is_done[batchidx]
}
def current_state(self):
# assert not self.expecting_new_episode, "start new episode first"'
assert self.frame_window is not None, "do something first"
sf = self.frames[self.frame_window]
sf = np.moveaxis(sf, 0, -1)
return sf
def init_caches(self):
self.rewards = np.zeros(self.capacity, dtype="float32")
self.prev_states = -np.ones((self.capacity, self.num_frame_stack),
dtype="int32")
self.next_states = -np.ones((self.capacity, self.num_frame_stack),
dtype="int32")
self.is_done = -np.ones(self.capacity, "int32")
self.actions = -np.ones(self.capacity, dtype="int32")
self.max_frame_cache = self.capacity + 2 * self.num_frame_stack + 1
self.frames = -np.ones((self.max_frame_cache,) + self.pic_size, dtype="float32")
|
import velocity_obstacle.velocity_obstacle as velocity_obstacle
import nmpc.nmpc as nmpc
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--mode", help="mode of obstacle avoidance; options: velocity_obstacle, or nmpc")
parser.add_argument(
"-f", "--filename", help="filename, in case you want to save the animation")
# args = parser.parse_args()
# print(args)
# if args.mode == "velocity_obstacle":
# velocity_obstacle.simulate(args.filename)
# elif args.mode == "nmpc":
# nmpc.simulate(args.filename)
# else:
# print("Please enter mode the desired mode: velocity_obstacle or nmpc")
velocity_obstacle.simulate('velocity_obstacle/velocity_obstacle.avi')
|
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from torchvision import models
from mmedit.models import ImgNormalize
from mmedit.models.registry import COMPONENTS
from mmedit.utils import get_root_logger
@COMPONENTS.register_module()
class LTE(nn.Module):
"""Learnable Texture Extractor
Based on pretrained VGG19. Generate features in 3 levels.
Args:
requires_grad (bool): Require grad or not. Default: True.
pixel_range (float): Pixel range of geature. Default: 1.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self, requires_grad=True, pixel_range=1., pretrained=None):
super().__init__()
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * pixel_range, 0.224 * pixel_range,
0.225 * pixel_range)
self.img_normalize = ImgNormalize(
pixel_range=pixel_range, img_mean=vgg_mean, img_std=vgg_std)
# use vgg19 weights to initialize
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.slice1.parameters():
param.requires_grad = requires_grad
for param in self.slice2.parameters():
param.requires_grad = requires_grad
for param in self.slice3.parameters():
param.requires_grad = requires_grad
# pretrained
if pretrained:
self.init_weights(pretrained)
def forward(self, x):
"""
Forward function.
Args:
x (Tensor): Input tensor with shape (n, 3, h, w).
Returns:
Forward results in 3 levels.
x_level1 (Tensor): Forward results in level 1 (n, 64, h, w).
x_level2 (Tensor): Forward results in level 2 (n, 128, h/2, w/2).
x_level3 (Tensor): Forward results in level 3 (n, 256, h/4, w/4).
"""
x = self.img_normalize(x)
x_level1 = x = self.slice1(x)
x_level2 = x = self.slice2(x)
x_level3 = x = self.slice3(x)
return x_level1, x_level2, x_level3
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass # use default initialization
else:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
|
import asyncio
import json
from galaxy.api.types import FriendInfo
from galaxy.api.errors import UnknownError
def test_get_friends_success(plugin, read, write):
request = {
"jsonrpc": "2.0",
"id": "3",
"method": "import_friends"
}
read.side_effect = [json.dumps(request).encode() + b"\n", b""]
plugin.get_friends.coro.return_value = [
FriendInfo("3", "Jan"),
FriendInfo("5", "Ola")
]
asyncio.run(plugin.run())
plugin.get_friends.assert_called_with()
response = json.loads(write.call_args[0][0])
assert response == {
"jsonrpc": "2.0",
"id": "3",
"result": {
"friend_info_list": [
{"user_id": "3", "user_name": "Jan"},
{"user_id": "5", "user_name": "Ola"}
]
}
}
def test_get_friends_failure(plugin, read, write):
request = {
"jsonrpc": "2.0",
"id": "3",
"method": "import_friends"
}
read.side_effect = [json.dumps(request).encode() + b"\n", b""]
plugin.get_friends.coro.side_effect = UnknownError()
asyncio.run(plugin.run())
plugin.get_friends.assert_called_with()
response = json.loads(write.call_args[0][0])
assert response == {
"jsonrpc": "2.0",
"id": "3",
"error": {
"code": 0,
"message": "Unknown error",
}
}
def test_add_friend(plugin, write):
friend = FriendInfo("7", "Kuba")
async def couritine():
plugin.add_friend(friend)
asyncio.run(couritine())
response = json.loads(write.call_args[0][0])
assert response == {
"jsonrpc": "2.0",
"method": "friend_added",
"params": {
"friend_info": {"user_id": "7", "user_name": "Kuba"}
}
}
def test_remove_friend(plugin, write):
async def couritine():
plugin.remove_friend("5")
asyncio.run(couritine())
response = json.loads(write.call_args[0][0])
assert response == {
"jsonrpc": "2.0",
"method": "friend_removed",
"params": {
"user_id": "5"
}
}
|
#todo: add list files option. list different file types
#list file extensions for specific type of files
#add exit as an option in more places
import shutil
import os
from pathlib import Path
#works on windows 10
version = "0.0.2"
options = ("move files", "file extensions", "clean desktop", "about", "exit")
folders = ("desktop", "documents", "downloads", "music", "pictures", "videos")
fileTypes = ("images", "videos", "audio", "documents")
#https://fileinfo.com/filetypes/common
extensions = {"images": (".jpg", ".gif", ".png", ".tiff", ".tif", ".yuv", ".thm", ".tga", ".pspimage", ".psd", ".dds", ".bmp"), "videos": (".3g2", ".3gp", ".asf", ".avi", ".flv", ".m4v", ".mov", ".mp4", ".mpg", ".rm", ".srt", ".swf", ".vob", ".wmv"), "audio": (".wav", ".mp3", ".mpa", ".wma", ".ogg", ".mid", ".m4a", ".m3u", ".iff", ".aif"), "documents": (".doc", ".docx", ".log", ".msg", ".otd", ".pages", ".rtf", ".tex", ".txt", ".pdf", ".wpd", ".wps", ".sxw", ".stw", ".csv", ".dat", ".pps", ".ppt", ".pptx", ".xlr", ".xls", ".xlsx", ".indd", ".pct", ".ai", ".eps", ".ps", ".svg") }
cleanDesktop = {"pictures":"images", "videos":"videos", "music":"audio", "documents":"documents"}
def getItem(items, message):
print(message)
tmp = input( str(items)[1:-1] + "\n" )
while(not tmp.lower().strip() in items):
print("not found")
print(message)
tmp = input( str(items)[1:-1] + "\n" )
return tmp.lower().strip()
def moveFiles(source, dest, fileType):
moved = 0
files = os.listdir(source)
for f in files:
if os.path.splitext(f)[1].lower() in extensions[fileType]:
shutil.move(source + f, dest)
print("\t" + os.path.splitext(f)[0] + os.path.splitext(f)[1])
moved += 1
return moved
option = getItem(options, "What would you like to do?")
while option != "exit":
if option == "move files":
sourceFolder = getItem(folders, "Which folder would you like to move from?")
destFolder = getItem(folders, "Which folder would you like to move to?")
source = str(Path.home()) + "/" + sourceFolder + "/"
dest = str(Path.home()) + "/" + destFolder + "/"
fileType = getItem(fileTypes, "Which type of file would you like to move?")
print("moving files...")
moved = moveFiles(source, dest, fileType)
print("moved " + str(moved) + " files")
elif option == "file extensions":
print(extensions)
elif option == "clean desktop":
source = str(Path.home()) + "/desktop/"
print("moving files...")
moved = 0
for key, value in cleanDesktop.items():
#keys are folders, values are fileTypes
dest = str(Path.home()) + "/" + key + "/"
fileType = value
moved += moveFiles(source, dest, fileType)
print("moved " + str(moved) + " files")
elif option == "about":
print("\tpymove version " + version)
print("\twindows only")
print("\tuse pymove to easily move different types of files between folders")
option = getItem(options, "What would you like to do?")
|
""" Connected Components """
import numpy as np
from scipy import ndimage
import cc3d
from ... import seg_utils
def connected_components(d, thresh=0, overlap_seg=None, dtype=np.uint32):
"""
Performs basic connected components on network
output given a threshold value. Returns the components
as a desired datatype (default: np.uint32)
"""
mask = d > thresh
if overlap_seg is None:
# C-order speeds up continuation extraction by a LOT
mask = np.ascontiguousarray(mask)
return cc3d.connected_components(mask, connectivity=6).astype(dtype)
else:
temp = np.zeros(d.shape, dtype=overlap_seg.dtype, order='C')
temp[mask] = overlap_seg[mask]
return cc3d.connected_components(temp, connectivity=6).astype(dtype)
def dilated_components(output, dil_param, cc_thresh):
"""
Performs a version of connected components with dilation
Expands the voxels over threshold by dil_param in 2D
Runs connected components on the dilated mask
Removes the voxels which weren't originally above threshold
"""
if dil_param == 0:
return connected_components(output, cc_thresh)
mask = output > cc_thresh
dil_mask = seg_utils.dilate_by_k(mask, dil_param)
ccs = connected_components(dil_mask, 0)
# Removing dilated voxels
ccs[mask == 0] = 0
return ccs
def dilate_mask_by_k(d, k):
""" Dilates a volume of data by k """
kernel = make_dilation_kernel(k)
return ndimage.binary_dilation(d, structure=kernel)
def make_dilation_kernel(k):
""" 2D Manhattan Distance Kernel """
kernel = ndimage.generate_binary_structure(2, 1)
return ndimage.iterate_structure(kernel, k)[np.newaxis, :, :]
|
# Copyright 2018 Harold Fellermann
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Collection of utilities"""
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Code taken from six (https://pypi.python.org/pypi/six).
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class MetaClass(meta):
"""The dummy metaclass"""
def __new__(cls, name, _, doc):
return meta(name, bases, doc)
return type.__new__(MetaClass, 'temporary_class', (), {})
|
"""Support for functionality to interact with Android TV / Fire TV devices."""
import functools
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
ATTR_COMMAND, ATTR_ENTITY_ID, CONF_DEVICE_CLASS, CONF_HOST, CONF_NAME,
CONF_PORT, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING,
STATE_STANDBY)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
ANDROIDTV_DOMAIN = 'androidtv'
_LOGGER = logging.getLogger(__name__)
SUPPORT_ANDROIDTV = SUPPORT_PAUSE | SUPPORT_PLAY | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_STOP | SUPPORT_VOLUME_MUTE | \
SUPPORT_VOLUME_STEP
SUPPORT_FIRETV = SUPPORT_PAUSE | SUPPORT_PLAY | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_SELECT_SOURCE | SUPPORT_STOP
CONF_ADBKEY = 'adbkey'
CONF_ADB_SERVER_IP = 'adb_server_ip'
CONF_ADB_SERVER_PORT = 'adb_server_port'
CONF_APPS = 'apps'
CONF_GET_SOURCES = 'get_sources'
CONF_TURN_ON_COMMAND = 'turn_on_command'
CONF_TURN_OFF_COMMAND = 'turn_off_command'
DEFAULT_NAME = 'Android TV'
DEFAULT_PORT = 5555
DEFAULT_ADB_SERVER_PORT = 5037
DEFAULT_GET_SOURCES = True
DEFAULT_DEVICE_CLASS = 'auto'
DEVICE_ANDROIDTV = 'androidtv'
DEVICE_FIRETV = 'firetv'
DEVICE_CLASSES = [DEFAULT_DEVICE_CLASS, DEVICE_ANDROIDTV, DEVICE_FIRETV]
SERVICE_ADB_COMMAND = 'adb_command'
SERVICE_ADB_COMMAND_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS):
vol.In(DEVICE_CLASSES),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ADBKEY): cv.isfile,
vol.Optional(CONF_ADB_SERVER_IP): cv.string,
vol.Optional(CONF_ADB_SERVER_PORT, default=DEFAULT_ADB_SERVER_PORT):
cv.port,
vol.Optional(CONF_GET_SOURCES, default=DEFAULT_GET_SOURCES): cv.boolean,
vol.Optional(CONF_APPS, default=dict()):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_TURN_ON_COMMAND): cv.string,
vol.Optional(CONF_TURN_OFF_COMMAND): cv.string
})
# Translate from `AndroidTV` / `FireTV` reported state to HA state.
ANDROIDTV_STATES = {'off': STATE_OFF,
'idle': STATE_IDLE,
'standby': STATE_STANDBY,
'playing': STATE_PLAYING,
'paused': STATE_PAUSED}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Android TV / Fire TV platform."""
from androidtv import setup
hass.data.setdefault(ANDROIDTV_DOMAIN, {})
host = '{0}:{1}'.format(config[CONF_HOST], config[CONF_PORT])
if CONF_ADB_SERVER_IP not in config:
# Use "python-adb" (Python ADB implementation)
if CONF_ADBKEY in config:
aftv = setup(host, config[CONF_ADBKEY],
device_class=config[CONF_DEVICE_CLASS])
adb_log = " using adbkey='{0}'".format(config[CONF_ADBKEY])
else:
aftv = setup(host, device_class=config[CONF_DEVICE_CLASS])
adb_log = ""
else:
# Use "pure-python-adb" (communicate with ADB server)
aftv = setup(host, adb_server_ip=config[CONF_ADB_SERVER_IP],
adb_server_port=config[CONF_ADB_SERVER_PORT],
device_class=config[CONF_DEVICE_CLASS])
adb_log = " using ADB server at {0}:{1}".format(
config[CONF_ADB_SERVER_IP], config[CONF_ADB_SERVER_PORT])
if not aftv.available:
# Determine the name that will be used for the device in the log
if CONF_NAME in config:
device_name = config[CONF_NAME]
elif config[CONF_DEVICE_CLASS] == DEVICE_ANDROIDTV:
device_name = 'Android TV device'
elif config[CONF_DEVICE_CLASS] == DEVICE_FIRETV:
device_name = 'Fire TV device'
else:
device_name = 'Android TV / Fire TV device'
_LOGGER.warning("Could not connect to %s at %s%s",
device_name, host, adb_log)
raise PlatformNotReady
if host in hass.data[ANDROIDTV_DOMAIN]:
_LOGGER.warning("Platform already setup on %s, skipping", host)
else:
if aftv.DEVICE_CLASS == DEVICE_ANDROIDTV:
device = AndroidTVDevice(aftv, config[CONF_NAME],
config[CONF_APPS],
config.get(CONF_TURN_ON_COMMAND),
config.get(CONF_TURN_OFF_COMMAND))
device_name = config[CONF_NAME] if CONF_NAME in config \
else 'Android TV'
else:
device = FireTVDevice(aftv, config[CONF_NAME], config[CONF_APPS],
config[CONF_GET_SOURCES],
config.get(CONF_TURN_ON_COMMAND),
config.get(CONF_TURN_OFF_COMMAND))
device_name = config[CONF_NAME] if CONF_NAME in config \
else 'Fire TV'
add_entities([device])
_LOGGER.debug("Setup %s at %s%s", device_name, host, adb_log)
hass.data[ANDROIDTV_DOMAIN][host] = device
if hass.services.has_service(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND):
return
def service_adb_command(service):
"""Dispatch service calls to target entities."""
cmd = service.data.get(ATTR_COMMAND)
entity_id = service.data.get(ATTR_ENTITY_ID)
target_devices = [dev for dev in hass.data[ANDROIDTV_DOMAIN].values()
if dev.entity_id in entity_id]
for target_device in target_devices:
output = target_device.adb_command(cmd)
# log the output if there is any
if output and (not isinstance(output, str) or output.strip()):
_LOGGER.info("Output of command '%s' from '%s': %s",
cmd, target_device.entity_id, repr(output))
hass.services.register(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND,
service_adb_command,
schema=SERVICE_ADB_COMMAND_SCHEMA)
def adb_decorator(override_available=False):
"""Send an ADB command if the device is available and catch exceptions."""
def _adb_decorator(func):
"""Wait if previous ADB commands haven't finished."""
@functools.wraps(func)
def _adb_exception_catcher(self, *args, **kwargs):
# If the device is unavailable, don't do anything
if not self.available and not override_available:
return None
try:
return func(self, *args, **kwargs)
except self.exceptions as err:
_LOGGER.error(
"Failed to execute an ADB command. ADB connection re-"
"establishing attempt in the next update. Error: %s", err)
self._available = False # pylint: disable=protected-access
return None
return _adb_exception_catcher
return _adb_decorator
class ADBDevice(MediaPlayerDevice):
"""Representation of an Android TV or Fire TV device."""
def __init__(self, aftv, name, apps, turn_on_command,
turn_off_command):
"""Initialize the Android TV / Fire TV device."""
from androidtv.constants import APPS, KEYS
self.aftv = aftv
self._name = name
self._apps = APPS
self._apps.update(apps)
self._keys = KEYS
self.turn_on_command = turn_on_command
self.turn_off_command = turn_off_command
# ADB exceptions to catch
if not self.aftv.adb_server_ip:
# Using "python-adb" (Python ADB implementation)
from adb.adb_protocol import (InvalidChecksumError,
InvalidCommandError,
InvalidResponseError)
from adb.usb_exceptions import TcpTimeoutException
self.exceptions = (AttributeError, BrokenPipeError, TypeError,
ValueError, InvalidChecksumError,
InvalidCommandError, InvalidResponseError,
TcpTimeoutException)
else:
# Using "pure-python-adb" (communicate with ADB server)
self.exceptions = (ConnectionResetError, RuntimeError)
# Property attributes
self._available = self.aftv.available
self._current_app = None
self._state = None
@property
def app_id(self):
"""Return the current app."""
return self._current_app
@property
def app_name(self):
"""Return the friendly name of the current app."""
return self._apps.get(self._current_app, self._current_app)
@property
def available(self):
"""Return whether or not the ADB connection is valid."""
return self._available
@property
def name(self):
"""Return the device name."""
return self._name
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def state(self):
"""Return the state of the player."""
return self._state
@adb_decorator()
def media_play(self):
"""Send play command."""
self.aftv.media_play()
@adb_decorator()
def media_pause(self):
"""Send pause command."""
self.aftv.media_pause()
@adb_decorator()
def media_play_pause(self):
"""Send play/pause command."""
self.aftv.media_play_pause()
@adb_decorator()
def turn_on(self):
"""Turn on the device."""
if self.turn_on_command:
self.aftv.adb_shell(self.turn_on_command)
else:
self.aftv.turn_on()
@adb_decorator()
def turn_off(self):
"""Turn off the device."""
if self.turn_off_command:
self.aftv.adb_shell(self.turn_off_command)
else:
self.aftv.turn_off()
@adb_decorator()
def media_previous_track(self):
"""Send previous track command (results in rewind)."""
self.aftv.media_previous_track()
@adb_decorator()
def media_next_track(self):
"""Send next track command (results in fast-forward)."""
self.aftv.media_next_track()
@adb_decorator()
def adb_command(self, cmd):
"""Send an ADB command to an Android TV / Fire TV device."""
key = self._keys.get(cmd)
if key:
return self.aftv.adb_shell('input keyevent {}'.format(key))
if cmd == 'GET_PROPERTIES':
return self.aftv.get_properties_dict()
return self.aftv.adb_shell(cmd)
class AndroidTVDevice(ADBDevice):
"""Representation of an Android TV device."""
def __init__(self, aftv, name, apps, turn_on_command,
turn_off_command):
"""Initialize the Android TV device."""
super().__init__(aftv, name, apps, turn_on_command,
turn_off_command)
self._device = None
self._device_properties = self.aftv.device_properties
self._is_volume_muted = None
self._unique_id = self._device_properties.get('serialno')
self._volume_level = None
@adb_decorator(override_available=True)
def update(self):
"""Update the device state and, if necessary, re-connect."""
# Check if device is disconnected.
if not self._available:
# Try to connect
self._available = self.aftv.connect(always_log_errors=False)
# To be safe, wait until the next update to run ADB commands.
return
# If the ADB connection is not intact, don't update.
if not self._available:
return
# Get the updated state and attributes.
state, self._current_app, self._device, self._is_volume_muted, \
self._volume_level = self.aftv.update()
self._state = ANDROIDTV_STATES[state]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._is_volume_muted
@property
def source(self):
"""Return the current playback device."""
return self._device
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANDROIDTV
@property
def unique_id(self):
"""Return the device unique id."""
return self._unique_id
@property
def volume_level(self):
"""Return the volume level."""
return self._volume_level
@adb_decorator()
def media_stop(self):
"""Send stop command."""
self.aftv.media_stop()
@adb_decorator()
def mute_volume(self, mute):
"""Mute the volume."""
self.aftv.mute_volume()
@adb_decorator()
def volume_down(self):
"""Send volume down command."""
self._volume_level = self.aftv.volume_down(self._volume_level)
@adb_decorator()
def volume_up(self):
"""Send volume up command."""
self._volume_level = self.aftv.volume_up(self._volume_level)
class FireTVDevice(ADBDevice):
"""Representation of a Fire TV device."""
def __init__(self, aftv, name, apps, get_sources,
turn_on_command, turn_off_command):
"""Initialize the Fire TV device."""
super().__init__(aftv, name, apps, turn_on_command,
turn_off_command)
self._get_sources = get_sources
self._running_apps = None
@adb_decorator(override_available=True)
def update(self):
"""Update the device state and, if necessary, re-connect."""
# Check if device is disconnected.
if not self._available:
# Try to connect
self._available = self.aftv.connect(always_log_errors=False)
# To be safe, wait until the next update to run ADB commands.
return
# If the ADB connection is not intact, don't update.
if not self._available:
return
# Get the `state`, `current_app`, and `running_apps`.
state, self._current_app, self._running_apps = \
self.aftv.update(self._get_sources)
self._state = ANDROIDTV_STATES[state]
@property
def source(self):
"""Return the current app."""
return self._current_app
@property
def source_list(self):
"""Return a list of running apps."""
return self._running_apps
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_FIRETV
@adb_decorator()
def media_stop(self):
"""Send stop (back) command."""
self.aftv.back()
@adb_decorator()
def select_source(self, source):
"""Select input source.
If the source starts with a '!', then it will close the app instead of
opening it.
"""
if isinstance(source, str):
if not source.startswith('!'):
self.aftv.launch_app(source)
else:
self.aftv.stop_app(source[1:].lstrip())
|
from copy import copy
import asyncio
import aiohttp
import importlib
import logging
import inspect
import types
from .communicaton import JsonRpcRequest, SyncJsonRpcRequest
from .threading import ThreadedWorkerPool
from .auth import DummyAuthBackend
from .protocol import (
encode_notification,
JsonRpcMsgTyp,
encode_result,
encode_error,
decode_msg,
)
from .exceptions import (
RpcGenericServerDefinedError,
RpcInvalidRequestError,
RpcMethodNotFoundError,
RpcInvalidParamsError,
RpcInternalError,
RpcError,
)
class JsonRpcMethod:
CREDENTIAL_KEYS = ['request', 'worker_pool']
def __init__(self, method):
self.method = method
# method introspection
try:
self.argspec = inspect.getfullargspec(method)
self.introspected = True
except TypeError: # unsupported callable
self.argspec = inspect.getfullargspec(lambda request: None)
self.introspected = False
self.defaults = copy(self.argspec.defaults)
self.args = [i for i in self.argspec.args
if i not in self.CREDENTIAL_KEYS + ['self']]
# required args
self.required_args = copy(self.args)
if self.defaults:
self.required_args = [
i for i in self.args[:-len(self.defaults or ())]
if i not in self.CREDENTIAL_KEYS
]
# optional args
self.optional_args = [
i for i in (self.args[len(self.args or []) -
len(self.defaults or ()):])
if i not in self.CREDENTIAL_KEYS + ['self']
]
# gen repr string
args = []
for i, v in enumerate(self.args[::-1]):
if self.defaults and not i >= len(self.defaults):
args.append('{}={}'.format(v, repr(self.defaults[i])))
else:
args.append(v)
args = [
*[i for i in self.CREDENTIAL_KEYS if i in self.argspec.args],
*args[::-1],
]
self._repr_str = 'JsonRpcMethod({}({}))'.format(
self.method.__name__,
', '.join(args),
)
def __repr__(self):
return self._repr_str
async def __call__(self, http_request, rpc, msg):
params = msg.data['params']
method_params = dict()
# convert args
if params is None:
params = {}
if type(params) not in (dict, list):
params = [params]
if type(params) == list:
params = {self.args[i]: v for i, v in enumerate(params)
if i < len(self.args)}
# required args
for i in self.required_args:
if i not in params:
raise RpcInvalidParamsError(message='to few arguments')
method_params[i] = params[i]
# optional args
for i, v in enumerate(self.optional_args):
method_params[v] = params.get(v, self.defaults[i])
# validators
if hasattr(self.method, 'validators'):
for arg_name, validator_list in self.method.validators.items():
if not isinstance(validator_list, (list, tuple)):
validator_list = [validator_list]
for validator in validator_list:
if isinstance(validator, type):
if not isinstance(method_params[arg_name], validator):
raise RpcInvalidParamsError(message="'{}' has to be '{}'".format(arg_name, validator.__name__)) # NOQA
elif isinstance(validator, types.FunctionType):
if not validator(method_params[arg_name]):
raise RpcInvalidParamsError(message="'{}': validation error".format(arg_name)) # NOQA
# credentials
if 'request' in self.argspec.args:
if asyncio.iscoroutinefunction(self.method):
method_params['request'] = JsonRpcRequest(
rpc=rpc, http_request=http_request, msg=msg)
else:
method_params['request'] = SyncJsonRpcRequest(
rpc=rpc, http_request=http_request, msg=msg)
if 'worker_pool' in self.argspec.args:
method_params['worker_pool'] = rpc.worker_pool
# run method
if asyncio.iscoroutinefunction(self.method):
return await self.method(**method_params)
else:
return await rpc.worker_pool.run(self.method, **method_params)
class JsonRpc(object):
def __init__(self, loop=None, max_workers=0, auth_backend=None,
logger=None):
self.clients = []
self.methods = {}
self.topics = {}
self.state = {}
self.logger = logger or logging.getLogger('aiohttp-json-rpc.server')
self.auth_backend = auth_backend or DummyAuthBackend()
self.loop = loop or asyncio.get_event_loop()
self.worker_pool = ThreadedWorkerPool(max_workers=max_workers)
self.add_methods(
('', self.get_methods),
('', self.get_topics),
('', self.get_subscriptions),
('', self.subscribe),
('', self.unsubscribe),
)
def _add_method(self, method, name='', prefix=''):
if not callable(method):
return
name = name or method.__name__
if prefix:
name = '{}__{}'.format(prefix, name)
self.methods[name] = JsonRpcMethod(method)
def _add_methods_from_object(self, obj, prefix='', ignore=[]):
for attr_name in dir(obj):
if attr_name.startswith('_') or attr_name in ignore:
continue
self._add_method(getattr(obj, attr_name), prefix=prefix)
def _add_methods_by_name(self, name, prefix=''):
try:
module = importlib.import_module(name)
self._add_methods_from_object(module, prefix=prefix)
except ImportError:
name = name.split('.')
module = importlib.import_module('.'.join(name[:-1]))
self._add_method(getattr(module, name[-1]), prefix=prefix)
def add_methods(self, *args, prefix=''):
for arg in args:
if not (type(arg) == tuple and len(arg) >= 2):
raise ValueError('invalid format')
if not type(arg[0]) == str:
raise ValueError('prefix has to be str')
prefix_ = prefix or arg[0]
method = arg[1]
if callable(method):
name = arg[2] if len(arg) >= 3 else ''
self._add_method(method, name=name, prefix=prefix_)
elif type(method) == str:
self._add_methods_by_name(method, prefix=prefix_)
else:
self._add_methods_from_object(method, prefix=prefix_)
def add_topics(self, *topics):
for topic in topics:
if type(topic) not in (str, tuple):
raise ValueError('Topic has to be string or tuple')
# find name
if type(topic) == str:
name = topic
else:
name = topic[0]
# find and apply decorators
def func(request):
return True
if type(topic) == tuple and len(topic) > 1:
decorators = topic[1]
if not type(decorators) == tuple:
decorators = (decorators, )
for decorator in decorators:
func = decorator(func)
self.topics[name] = func
def __call__(self, request):
return self.handle_request(request)
async def handle_request(self, request):
# prepare request
request.rpc = self
self.auth_backend.prepare_request(request)
# handle request
if request.method == 'GET':
# handle Websocket
if request.headers.get('upgrade', '').lower() == 'websocket':
return (await self.handle_websocket_request(request))
# handle GET
else:
return aiohttp.web.Response(status=405)
# handle POST
elif request.method == 'POST':
return aiohttp.web.Response(status=405)
async def _ws_send_str(self, client, string):
if client.ws._writer.transport.is_closing():
self.clients.remove(client)
await client.ws.close()
await client.ws.send_str(string)
async def _handle_rpc_msg(self, http_request, raw_msg):
try:
msg = decode_msg(raw_msg.data)
self.logger.debug('message decoded: %s', msg)
except RpcError as error:
await self._ws_send_str(http_request, encode_error(error))
return
# handle requests
if msg.type == JsonRpcMsgTyp.REQUEST:
self.logger.debug('msg gets handled as request')
# check if method is available
if msg.data['method'] not in http_request.methods:
self.logger.debug('method %s is unknown or restricted',
msg.data['method'])
await self._ws_send_str(http_request, encode_error(
RpcMethodNotFoundError(msg_id=msg.data.get('id', None))
))
return
# call method
raw_response = getattr(
http_request.methods[msg.data['method']].method,
'raw_response',
False,
)
try:
result = await http_request.methods[msg.data['method']](
http_request=http_request,
rpc=self,
msg=msg,
)
if not raw_response:
result = encode_result(msg.data['id'], result)
await self._ws_send_str(http_request, result)
except (RpcGenericServerDefinedError,
RpcInvalidRequestError,
RpcInvalidParamsError) as error:
await self._ws_send_str(
http_request,
encode_error(error, id=msg.data.get('id', None))
)
except Exception as error:
logging.error(error, exc_info=True)
await self._ws_send_str(http_request, encode_error(
RpcInternalError(msg_id=msg.data.get('id', None))
))
# handle result
elif msg.type == JsonRpcMsgTyp.RESULT:
self.logger.debug('msg gets handled as result')
http_request.pending[msg.data['id']].set_result(
msg.data['result'])
else:
self.logger.debug('unsupported msg type (%s)', msg.type)
await self._ws_send_str(http_request, encode_error(
RpcInvalidRequestError(msg_id=msg.data.get('id', None))
))
async def handle_websocket_request(self, http_request):
http_request.msg_id = 0
http_request.pending = {}
# prepare and register websocket
ws = aiohttp.web_ws.WebSocketResponse()
await ws.prepare(http_request)
http_request.ws = ws
self.clients.append(http_request)
while not ws.closed:
self.logger.debug('waiting for messages')
raw_msg = await ws.receive()
if not raw_msg.type == aiohttp.WSMsgType.TEXT:
continue
self.logger.debug('raw msg received: %s', raw_msg.data)
self.loop.create_task(self._handle_rpc_msg(http_request, raw_msg))
self.clients.remove(http_request)
return ws
async def get_methods(self, request):
return list(request.methods.keys())
async def get_topics(self, request):
return list(request.topics)
async def get_subscriptions(self, request):
return list(request.subscriptions)
async def subscribe(self, request):
if type(request.params) is not list:
request.params = [request.params]
for topic in request.params:
if topic and topic in request.topics:
request.subscriptions.add(topic)
if topic in self.state:
await request.send_notification(topic, self.state[topic])
return list(request.subscriptions)
async def unsubscribe(self, request):
if type(request.params) is not list:
request.params = [request.params]
for topic in request.params:
if topic and topic in request.subscriptions:
request.subscriptions.remove(topic)
return list(request.subscriptions)
def filter(self, topics):
if type(topics) is not list:
topics = [topics]
topics = set(topics)
for client in self.clients:
if client.ws.closed:
continue
if len(topics & client.subscriptions) > 0:
yield client
async def notify(self, topic, data=None, state=False):
if type(topic) is not str:
raise ValueError
if state:
self.state[topic] = data
notification = None
for client in self.filter(topic):
try:
if notification is None:
notification = encode_notification(topic, data)
await self._ws_send_str(client, notification)
except Exception as e:
self.logger.exception(e)
|
"""
This file offers the methods to automatically retrieve the graph Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusUhrbacteriaBacteriumRifoxyb2Full4511(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusUhrbacteriaBacteriumRifoxyb2Full4511",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
"""
Created on May 31, 2016
@author: Jafar Taghiyar (jtaghiyar@bccrc.ca)
Updated Nov 21, 2017 by Spencer Vatrt-Watts (github.com/Spenca)
"""
from collections import OrderedDict
#============================
# Django imports
#----------------------------
from django.db import models
from django.shortcuts import render
from django.template.defaulttags import register
class Render(object):
"""
Render the request with the given template.
"""
def __init__(self, template):
self.template = template
def __call__(self, func):
# @functools.wraps(func)
def wrapper(request, *args, **kwargs):
res = func(request, *args, **kwargs)
# render it only if the response is a context dictionary
if isinstance(res, dict):
return render(request, self.template, res)
else:
return res
return wrapper
def create_chrfield(name, max_length=50, blank=True, null=True, **kwargs):
"""wrap models.CharField for ease of use."""
return models.CharField(
name,
max_length=max_length,
blank=blank,
null=null,
**kwargs
)
def create_textfield(name, max_length=5000, blank=True, null=True, **kwargs):
"""wrap models.TextField for ease of use."""
return models.TextField(
name,
max_length=max_length,
blank=blank,
null=null,
**kwargs
)
def create_intfield(name, blank=True, null=True, **kwargs):
"""wrap models.IntegerField for ease of use."""
return models.IntegerField(
name,
blank=blank,
null=null,
**kwargs
)
def create_pathfield(name, max_length=250, blank=True, null=True, **kwargs):
"""wrap models.CharField for ease of use."""
return models.CharField(
name,
max_length=max_length,
blank=blank,
null=null,
**kwargs
)
def upload_dlp_library_path(instance, filename):
"""make a proper /path/to/filename for uploaded files."""
return "{0}/{1}/{2}".format(
'library', # leave this as 'library' for backwards compatibility
instance.library.id,
filename
)
# Maintain migrations backwards compatibility
upload_path = upload_dlp_library_path
def upload_tenx_library_path(instance, filename):
"""Make a proper file path for uploading 10x library files."""
return "{0}/{1}/{2}".format(
'tenxlibrary',
instance.library.id,
filename
)
class FieldValue(object):
fields_to_exclude = ['ID']
values_to_exclude = ['id']
def get_fields(self):
"""get verbose names of all the fields."""
field_names = [f.verbose_name for f in self._meta.fields
if f.verbose_name not in self.fields_to_exclude]
return field_names
def get_values(self):
"""get values of all the fields."""
fields = [field.name for field in self._meta.fields]
values = []
for f in fields:
if f not in self.values_to_exclude:
a = "get_%s_display" % (f)
if hasattr(self, a):
values.append(getattr(self, a)())
else:
values.append(getattr(self, f))
return values
def get_field_values(self):
"""return a dict of key:values."""
res = OrderedDict()
for field in self._meta.fields:
field_verbose_name = field.verbose_name
field_name = field.name
if field_verbose_name not in self.fields_to_exclude:
a = "get_%s_display" % (field_name)
if hasattr(self, a):
value = getattr(self, a)()
else:
value = getattr(self, field_name)
res[field_verbose_name] = value
return res
class LibraryAssistant(object):
gsc_required_fields = [
(
"sample",
"Sample",
"taxonomy_id",
"Taxonomy ID",
),
(
"libraryconstructioninformation",
"Library Construction Information",
"library_type",
"Library type",
),
(
"libraryconstructioninformation",
"Library Construction Information",
"library_construction_method",
"Library construction method",
),
(
"libraryquantificationandstorage",
"Library Quantification and Storage",
"quantification_method",
"Quantification method",
),
(
"libraryquantificationandstorage",
"Library Quantification and Storage",
"dna_concentration_nm",
"DNA concentration (nM)",
),
(
"libraryquantificationandstorage",
"Library Quantification and Storage",
"storage_medium",
"Storage medium",
),
(
"libraryquantificationandstorage",
"Library Quantification and Storage",
"size_range",
"Size range",
),
(
"libraryquantificationandstorage",
"Library Quantification and Storage",
"average_size",
"Average size",
),
]
def has_library_sample_detail(self):
if self.__class__.__name__ == 'DlpLibrary':
return hasattr(self, 'dlplibrarysampledetail')
elif self.__class__.__name__ == 'PbalLibrary':
return hasattr(self, 'pballibrarysampledetail')
elif self.__class__.__name__ == 'TenxLibrary':
return hasattr(self, 'tenxlibrarysampledetail')
def has_library_construction_information(self):
if self.__class__.__name__ == 'DlpLibrary':
return hasattr(self, 'dlplibraryconstructioninformation')
elif self.__class__.__name__ == 'PbalLibrary':
return hasattr(self, 'pballibraryconstructioninformation')
elif self.__class__.__name__ == 'TenxLibrary':
return hasattr(self, 'tenxlibraryconstructioninformation')
def has_library_quantification_and_storage(self):
if self.__class__.__name__ == 'DlpLibrary':
return hasattr(self, 'dlplibraryquantificationandstorage')
elif self.__class__.__name__ == 'PbalLibrary':
return hasattr(self, 'pballibraryquantificationandstorage')
elif self.__class__.__name__ == 'TenxLibrary':
return hasattr(self, 'tenxlibraryquantificationandstorage')
def get_missing_gsc_required_fields(self):
missing_required_fields = []
get_value = lambda related_obj, field: getattr(
getattr(self, related_obj), field
)
for i in self.gsc_required_fields:
related_obj = i[0]
obj_verbose_name = i[1]
field = i[2]
field_verbose_name = i[3]
try:
value = get_value(related_obj, field)
# self might not have the related_obj yet.
except:
missing_required_fields.append(
(obj_verbose_name, field_verbose_name)
)
if not value:
missing_required_fields.append(
(obj_verbose_name, field_verbose_name)
)
return missing_required_fields
def is_sequenced(self):
if self.__class__.__name__ == 'DlpLibrary':
return any([s.dlplane_set.filter(path_to_archive__isnull=False) for s in self.dlpsequencing_set.all()])
elif self.__class__.__name__ == 'PbalLibrary':
return any([s.pballane_set.filter(path_to_archive__isnull=False) for s in self.pbalsequencing_set.all()])
elif self.__class__.__name__ == 'TenxLibrary':
return any([s.tenxlane_set.filter(path_to_archive__isnull=False) for s in self.tenxsequencing_set.all()])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
"""Common paths used in tests"""
import os
SHARED_MOCK_DATA_DIR = os.path.dirname(os.path.realpath(__file__))
MOCK_APP_CONFIG = os.path.join(SHARED_MOCK_DATA_DIR, "mock_app_config")
MOCK_COMMENTED_APP_CONFIG = os.path.join(SHARED_MOCK_DATA_DIR, "mock_commented_app_config")
MOCK_COMPONENT = os.path.join(SHARED_MOCK_DATA_DIR, "mock_component.py")
|
import _plotly_utils.basevalidators
class OperationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="operation",
parent_name="histogram2dcontour.contours",
**kwargs
):
super(OperationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"=",
"<",
">=",
">",
"<=",
"[]",
"()",
"[)",
"(]",
"][",
")(",
"](",
")[",
],
),
**kwargs
)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.nn import conv_filter_ad
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
import os
from akg.utils.kernel_exec import gen_kernel_name
from tests.common.base import get_rtol_atol
def conv_backprop_filter_naive(x, w, y, pad_, stride_):
N, C, H, W = x.shape
_, _, OH, OW = y.shape
CO, CI, KH, KW = w.shape
pad_top, pad_bottom, pad_left, pad_right = pad_
stride_h, stride_w = stride_
x_pad = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=0)
x_img2col = np.full((N, C, KH, KW, OH, OW), 0, np.float32)
for nn in range(N):
for nc in range(C):
for nh in range(KH):
for nw in range(KW):
for nho in range(OH):
for nwo in range(OW):
x_img2col[nn, nc, nh, nw, nho, nwo] = x_pad[nn, nc, nho * stride_h + nh, nwo * stride_w + nw]
dw = np.zeros_like(w)
for nn in range(CO):
for nc in range(CI):
for nh in range(KH):
for nw in range(KW):
dw[nn, nc, nh, nw] += np.sum(y[:, nn, :, :] * x_img2col[:, nc, nh, nw, :, :], axis=(0, 1, 2))
N, C, H, W = dw.shape
dw = dw.reshape(N, C // 16, 16, H, W).transpose(1, 3, 4, 0, 2).copy()
dw = dw.reshape(C // 16 * H * W, N // 16, 16, 16)
return dw
def gen_data_dw(x_shape, w_shape, pad_, stride_, dilation_, expect_file, attrs=None):
block_size = 16
print("Data gen ...")
fp32_mad = False
if (fp32_mad):
mmad_dtype = np.float32
else:
mmad_dtype = np.float16
x = random_gaussian(x_shape, miu=0.5, sigma=0.01).astype(mmad_dtype)
w = random_gaussian(w_shape, miu=1, sigma=0.1).astype(mmad_dtype)
pad_top, pad_bottom, pad_left, pad_right = pad_
stride_h, stride_w = stride_
dilation_h, dilation_w = dilation_
Ho = (x_shape[2] + pad_top + pad_bottom - ((w_shape[2] - 1) * dilation_h + 1)) // stride_h + 1
Wo = (x_shape[3] + pad_left + pad_right - ((w_shape[3] - 1) * dilation_w + 1)) // stride_w + 1
out_shape = (x_shape[0], w_shape[0], Ho, Wo)
y = random_gaussian(out_shape, miu=1, sigma=0.1).astype(mmad_dtype)
N, C, H, W = w_shape
dw_shape = (C // block_size * H * W, N // block_size, block_size, block_size)
flag_w = os.environ.get("WRITE_TO_DISK", "No")
if (flag_w == "No") and (os.path.exists(expect_file) == True):
# read expect from file
dw = np.fromfile(expect_file, np.float32).reshape(dw_shape)
else:
# compute expect data:
dw = conv_backprop_filter_naive(x.astype(np.float32), w.astype(np.float32), y.astype(np.float32), pad_, stride_)
if flag_w == "Yes":
# write expect to file
with open(expect_file, "w+") as file:
dw.tofile(file)
file.close()
# reshape
C0 = block_size
ON, OC, OH, OW = out_shape
WN, WC, WH, WW = w_shape
FN, FC, FH, FW = x_shape
x = x.reshape(FN, FC // C0, C0, FH, FW).transpose(0, 1, 3, 4, 2).copy()
y = y.reshape(ON, OC // C0, C0, OH, OW).transpose(0, 1, 3, 4, 2).copy()
return y, x, dw
def compare_4D(out_data, expect):
data_len = expect.size
actual = out_data
N, C1, H, W = out_data.shape
error = 0
count = 0
lastErr = -2
continueErr = 0
maxContinue = -1
maxEnd = 0
partial_debug = 0
for n in range(N):
for c1 in range(C1):
for h in range(H):
for w in range(W):
a = actual[n, c1, h, w]
b = expect[n, c1, h, w]
if (abs(a - b) > abs(b) * 5e-03):
if (partial_debug and (a == 0.0)):
continue
error += 1
if lastErr + 1 == count:
continueErr += 1
else:
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
continueErr = 1
lastErr = count
count += 1
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
print("error num: %d/%d (%.2f%%)" % (error, count, 100.0 * error / count))
print("longest error range: [%d, %d]" % (maxEnd - maxContinue + 1, maxEnd))
if maxContinue >= 16:
assert_res = False
else:
assert_res = True
return assert_res
def conv_filter_ad_run(fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=None):
block_size = 16
conv_dtype = 'float16'
in_n, in_c, in_h, in_w = fmap_shape
cout, cin, w_h, w_w = filter_shape
assert(in_c == cin)
in_c = (in_c + block_size - 1) // block_size * block_size
cout = (cout + block_size - 1) // block_size * block_size
pad_top, pad_bottom, pad_left, pad_right = pad_
stride_h, stride_w = stride_
out_n = in_n
out_c = cout
out_h = (in_h + pad_top + pad_bottom - w_h) // stride_h + 1
out_w = (in_w + pad_left + pad_right - w_w) // stride_w + 1
x_shape = (in_n, in_c, in_h, in_w)
w_shape = (cout, in_c, w_h, w_w)
x_5D_shape = (in_n, in_c // block_size, in_h, in_w, block_size)
y_5D_shape = (out_n, out_c // block_size, out_h, out_w, block_size)
forward_input_output_shapes = [y_5D_shape, x_5D_shape]
dw_input_shapes = [y_5D_shape, x_5D_shape]
input_file = os.environ.get("RANDOM_DATA_DISK_PATH", "")
expect_file = input_file + "/" + gen_kernel_name([dw_input_shapes], [conv_dtype],
op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_],
kernel_name='conv_filter_ad', attrs=attrs) + ".bin"
print("gen_data begin.")
dy_data, dx_data, expect = gen_data_dw(x_shape, w_shape, pad_, stride_, dilation_, expect_file, attrs=attrs)
print("gen_data finished.")
out_data = np.full(expect.shape, 0, 'float32')
np_input = (dy_data, dx_data)
flag_w = os.environ.get("WRITE_TO_DISK", "No")
if flag_w == "Yes":
return np_input, out_data, expect, True
mod = utils.op_build_test(conv_filter_ad.conv_filter_ad, [dw_input_shapes], [conv_dtype],
op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_], kernel_name='conv_filter_ad', attrs=attrs, dump_code = True)
args = (dy_data, dx_data, out_data)
out_data = utils.mod_launch(mod, args, expect=expect)
rtol, atol = get_rtol_atol("conv_filter_ad", conv_dtype)
assert_res = compare_tensor(out_data, expect, rtol=rtol, atol=atol, equal_nan=True)
return np_input, out_data, expect, assert_res
|
from django.db import models
# class Item(models.Model):
# item = models.CharField(max_length=120)
#
# def __str__(self):
# return self.item
class Record(models.Model):
charity = models.CharField('Charity', max_length=120)
# item = models.CharField(max_length=120)
# time = models.CharField(max_length=120)
time = models.DateTimeField(auto_now_add=True)
quantity = models.PositiveIntegerField(default=1)
# item = models.ForeignKey(Item, on_delete=models.CASCADE)
# time = models.DateTimeField(input_formats=["%d %b %Y %H:%M:%S %Z"])
location = models.CharField(max_length=60)
|
from rest_framework import serializers
from customer.models import Books
class BooksSerializer(serializers.ModelSerializer):
class Meta:
model = Books
fields = "__all__"
class BooksSerializerGroup(serializers.ModelSerializer):
count = serializers.IntegerField()
class Meta:
model = Books
fields = ("title", "author", "count")
|
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework import permissions
from quickstart.serializers import UserSerializer, GroupSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
|
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
def _is_using_buggy_driver():
import bgl
# We need to be conservative here because in multi-GPU systems display card
# might be quite old, but others one might be just good.
#
# So We shouldn't disable possible good dedicated cards just because display
# card seems weak. And instead we only blacklist configurations which are
# proven to cause problems.
if bgl.glGetString(bgl.GL_VENDOR) == "ATI Technologies Inc.":
import re
version = bgl.glGetString(bgl.GL_VERSION)
if version.endswith("Compatibility Profile Context"):
# Old HD 4xxx and 5xxx series drivers did not have driver version
# in the version string, but those cards do not quite work and
# causing crashes.
return True
regex = re.compile(".*Compatibility Profile Context ([0-9]+(\.[0-9]+)+)$")
if not regex.match(version):
# Skip cards like FireGL
return False
version = regex.sub("\\1", version).split('.')
return int(version[0]) == 8
return False
def _workaround_buggy_drivers():
if _is_using_buggy_driver():
import _cycles
if hasattr(_cycles, "opencl_disable"):
print("Cycles: OpenGL driver known to be buggy, disabling OpenCL platform.")
_cycles.opencl_disable()
def init():
import bpy
import _cycles
import os.path
# Workaround possibly buggy legacy drivers which crashes on the OpenCL
# device enumeration.
#
# This checks are not really correct because they might still fail
# in the case of multiple GPUs. However, currently buggy drivers
# are really old and likely to be used in single GPU systems only
# anyway.
#
# Can't do it in the background mode, so we hope OpenCL is no enabled
# in the user preferences.
if not bpy.app.background:
_workaround_buggy_drivers()
path = os.path.dirname(__file__)
user_path = os.path.dirname(os.path.abspath(bpy.utils.user_resource('CONFIG', '')))
_cycles.init(path, user_path, bpy.app.background)
def create(engine, data, scene, region=None, v3d=None, rv3d=None, preview_osl=False):
import bpy
import _cycles
data = data.as_pointer()
userpref = bpy.context.user_preferences.as_pointer()
scene = scene.as_pointer()
if region:
region = region.as_pointer()
if v3d:
v3d = v3d.as_pointer()
if rv3d:
rv3d = rv3d.as_pointer()
engine.session = _cycles.create(engine.as_pointer(), userpref, data, scene, region, v3d, rv3d, preview_osl)
def free(engine):
if hasattr(engine, "session"):
if engine.session:
import _cycles
_cycles.free(engine.session)
del engine.session
def render(engine):
import _cycles
if hasattr(engine, "session"):
_cycles.render(engine.session)
def bake(engine, obj, pass_type, object_id, pixel_array, num_pixels, depth, result):
import _cycles
session = getattr(engine, "session", None)
if session is not None:
_cycles.bake(engine.session, obj.as_pointer(), pass_type, object_id, pixel_array.as_pointer(), num_pixels, depth, result.as_pointer())
def reset(engine, data, scene):
import _cycles
data = data.as_pointer()
scene = scene.as_pointer()
_cycles.reset(engine.session, data, scene)
def update(engine, data, scene):
import _cycles
_cycles.sync(engine.session)
def draw(engine, region, v3d, rv3d):
import _cycles
v3d = v3d.as_pointer()
rv3d = rv3d.as_pointer()
# draw render image
_cycles.draw(engine.session, v3d, rv3d)
def available_devices():
import _cycles
return _cycles.available_devices()
def with_osl():
import _cycles
return _cycles.with_osl
def with_network():
import _cycles
return _cycles.with_network
def system_info():
import _cycles
return _cycles.system_info()
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from tempest.lib import decorators
from novaclient.tests.functional import base
from novaclient.tests.functional.v2.legacy import test_servers
from novaclient.v2 import shell
class TestServersBootNovaClient(test_servers.TestServersBootNovaClient):
"""Servers boot functional tests."""
COMPUTE_API_VERSION = "2.latest"
class TestServersListNovaClient(test_servers.TestServersListNovaClient):
"""Servers list functional tests."""
COMPUTE_API_VERSION = "2.latest"
class TestServerLockV29(base.ClientTestBase):
COMPUTE_API_VERSION = "2.9"
def _show_server_and_check_lock_attr(self, server, value):
output = self.nova("show %s" % server.id)
self.assertEqual(str(value),
self._get_value_from_the_table(output, "locked"))
def test_attribute_presented(self):
# prepare
server = self._create_server()
# testing
self._show_server_and_check_lock_attr(server, False)
self.nova("lock %s" % server.id)
self._show_server_and_check_lock_attr(server, True)
self.nova("unlock %s" % server.id)
self._show_server_and_check_lock_attr(server, False)
class TestServersDescription(base.ClientTestBase):
COMPUTE_API_VERSION = "2.19"
def _boot_server_with_description(self):
descr = "Some words about this test VM."
server = self._create_server(description=descr)
self.assertEqual(descr, server.description)
return server, descr
def test_create(self):
# Add a description to the tests that create a server
server, descr = self._boot_server_with_description()
output = self.nova("show %s" % server.id)
self.assertEqual(descr, self._get_value_from_the_table(output,
"description"))
def test_list_servers_with_description(self):
# Check that the description is returned as part of server details
# for a server list
server, descr = self._boot_server_with_description()
output = self.nova("list --fields description")
self.assertEqual(server.id,
self._get_column_value_from_single_row_table(
output, "ID"))
self.assertEqual(descr,
self._get_column_value_from_single_row_table(
output, "Description"))
@decorators.skip_because(bug="1694371")
def test_rebuild(self):
# Add a description to the tests that rebuild a server
server, descr = self._boot_server_with_description()
descr = "New description for rebuilt VM."
self.nova("rebuild --description '%s' %s %s" %
(descr, server.id, self.image.name))
shell._poll_for_status(
self.client.servers.get, server.id,
'rebuild', ['active'])
output = self.nova("show %s" % server.id)
self.assertEqual(descr, self._get_value_from_the_table(output,
"description"))
def test_remove_description(self):
# Remove description from server booted with it
server, descr = self._boot_server_with_description()
self.nova("update %s --description ''" % server.id)
output = self.nova("show %s" % server.id)
self.assertEqual("-", self._get_value_from_the_table(output,
"description"))
def test_add_remove_description_on_existing_server(self):
# Set and remove the description on an existing server
server = self._create_server()
descr = "Add a description for previously-booted VM."
self.nova("update %s --description '%s'" % (server.id, descr))
output = self.nova("show %s" % server.id)
self.assertEqual(descr, self._get_value_from_the_table(output,
"description"))
self.nova("update %s --description ''" % server.id)
output = self.nova("show %s" % server.id)
self.assertEqual("-", self._get_value_from_the_table(output,
"description"))
def test_update_with_description_longer_than_255_symbols(self):
# Negative case for description longer than 255 characters
server = self._create_server()
descr = ''.join(random.choice(string.ascii_letters)
for i in range(256))
output = self.nova("update %s --description '%s'" % (server.id, descr),
fail_ok=True, merge_stderr=True)
self.assertIn("ERROR (BadRequest): Invalid input for field/attribute"
" description. Value: %s. u\'%s\' is too long (HTTP 400)"
% (descr, descr), output)
class TestServersTagsV226(base.ClientTestBase):
COMPUTE_API_VERSION = "2.26"
def _boot_server_with_tags(self, tags=["t1", "t2"]):
uuid = self._create_server().id
self.client.servers.set_tags(uuid, tags)
return uuid
def test_show(self):
uuid = self._boot_server_with_tags()
output = self.nova("show %s" % uuid)
self.assertEqual('["t1", "t2"]', self._get_value_from_the_table(
output, "tags"))
def test_unicode_tag_correctly_displayed(self):
"""Regression test for bug #1669683.
List and dict fields with unicode cannot be correctly
displayed.
Ensure that once we fix this it doesn't regress.
"""
# create an instance with chinese tag
uuid = self._boot_server_with_tags(tags=["中文标签"])
output = self.nova("show %s" % uuid)
self.assertEqual('["中文标签"]', self._get_value_from_the_table(
output, "tags"))
def test_list(self):
uuid = self._boot_server_with_tags()
output = self.nova("server-tag-list %s" % uuid)
tags = self._get_list_of_values_from_single_column_table(
output, "Tag")
self.assertEqual(["t1", "t2"], tags)
def test_add(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-add %s t3" % uuid)
self.assertEqual(["t1", "t2", "t3"],
self.client.servers.tag_list(uuid))
def test_add_many(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-add %s t3 t4" % uuid)
self.assertEqual(["t1", "t2", "t3", "t4"],
self.client.servers.tag_list(uuid))
def test_set(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-set %s t3 t4" % uuid)
self.assertEqual(["t3", "t4"], self.client.servers.tag_list(uuid))
def test_delete(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-delete %s t2" % uuid)
self.assertEqual(["t1"], self.client.servers.tag_list(uuid))
def test_delete_many(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-delete %s t1 t2" % uuid)
self.assertEqual([], self.client.servers.tag_list(uuid))
def test_delete_all(self):
uuid = self._boot_server_with_tags()
self.nova("server-tag-delete-all %s" % uuid)
self.assertEqual([], self.client.servers.tag_list(uuid))
class TestServersAutoAllocateNetworkCLI(base.ClientTestBase):
COMPUTE_API_VERSION = '2.37'
def _find_network_in_table(self, table):
# Example:
# +-----------------+-----------------------------------+
# | Property | Value |
# +-----------------+-----------------------------------+
# | private network | 192.168.154.128 |
# +-----------------+-----------------------------------+
for line in table.split('\n'):
if '|' in line:
l_property, l_value = line.split('|')[1:3]
if ' network' in l_property.strip():
return ' '.join(l_property.strip().split()[:-1])
def test_boot_server_with_auto_network(self):
"""Tests that the CLI defaults to 'auto' when --nic isn't specified.
"""
# check to see if multiple networks are available because if so we
# have to skip this test as auto will fail with a 409 conflict as it's
# an ambiguous request and nova won't know which network to pick
if self.multiple_networks:
# we could potentially get around this by extending TenantTestBase
self.skipTest('multiple networks available')
server_info = self.nova('boot', params=(
'%(name)s --flavor %(flavor)s --poll '
'--image %(image)s ' % {'name': self.name_generate(),
'flavor': self.flavor.id,
'image': self.image.id}))
server_id = self._get_value_from_the_table(server_info, 'id')
self.addCleanup(self.wait_for_resource_delete,
server_id, self.client.servers)
self.addCleanup(self.client.servers.delete, server_id)
# get the server details to verify there is a network, we don't care
# what the network name is, we just want to see an entry show up
server_info = self.nova('show', params=server_id)
network = self._find_network_in_table(server_info)
self.assertIsNotNone(
network, 'Auto-allocated network not found: %s' % server_info)
def test_boot_server_with_no_network(self):
"""Tests that '--nic none' is honored.
"""
server_info = self.nova('boot', params=(
'%(name)s --flavor %(flavor)s --poll '
'--image %(image)s --nic none' %
{'name': self.name_generate(),
'flavor': self.flavor.id,
'image': self.image.id}))
server_id = self._get_value_from_the_table(server_info, 'id')
self.addCleanup(self.wait_for_resource_delete,
server_id, self.client.servers)
self.addCleanup(self.client.servers.delete, server_id)
# get the server details to verify there is not a network
server_info = self.nova('show', params=server_id)
network = self._find_network_in_table(server_info)
self.assertIsNone(
network, 'Unexpected network allocation: %s' % server_info)
class TestServersDetailsFlavorInfo(base.ClientTestBase):
COMPUTE_API_VERSION = '2.47'
def _validate_flavor_details(self, flavor_details, server_details):
# This is a mapping between the keys used in the flavor GET response
# and the keys used for the flavor information embedded in the server
# details.
flavor_key_mapping = {
"OS-FLV-EXT-DATA:ephemeral": "flavor:ephemeral",
"disk": "flavor:disk",
"extra_specs": "flavor:extra_specs",
"name": "flavor:original_name",
"ram": "flavor:ram",
"swap": "flavor:swap",
"vcpus": "flavor:vcpus",
}
for key in flavor_key_mapping:
flavor_val = self._get_value_from_the_table(
flavor_details, key)
server_flavor_val = self._get_value_from_the_table(
server_details, flavor_key_mapping[key])
if key is "swap" and flavor_val is "":
# "flavor-show" displays zero swap as empty string.
flavor_val = '0'
self.assertEqual(flavor_val, server_flavor_val)
def _setup_extra_specs(self, flavor_id):
extra_spec_key = "dummykey"
self.nova('flavor-key', params=('%(flavor)s set %(key)s=dummyval' %
{'flavor': flavor_id,
'key': extra_spec_key}))
unset_params = ('%(flavor)s unset %(key)s' %
{'flavor': flavor_id, 'key': extra_spec_key})
self.addCleanup(self.nova, 'flavor-key', params=unset_params)
def test_show(self):
self._setup_extra_specs(self.flavor.id)
uuid = self._create_server().id
server_output = self.nova("show %s" % uuid)
flavor_output = self.nova("flavor-show %s" % self.flavor.id)
self._validate_flavor_details(flavor_output, server_output)
def test_show_minimal(self):
uuid = self._create_server().id
server_output = self.nova("show --minimal %s" % uuid)
server_output_flavor = self._get_value_from_the_table(
server_output, 'flavor')
self.assertEqual(self.flavor.name, server_output_flavor)
def test_list(self):
self._setup_extra_specs(self.flavor.id)
self._create_server()
server_output = self.nova("list --fields flavor:disk")
# namespaced fields get reformatted slightly as column names
server_flavor_val = self._get_column_value_from_single_row_table(
server_output, 'flavor: Disk')
flavor_output = self.nova("flavor-show %s" % self.flavor.id)
flavor_val = self._get_value_from_the_table(flavor_output, 'disk')
self.assertEqual(flavor_val, server_flavor_val)
class TestInterfaceAttach(base.ClientTestBase):
COMPUTE_API_VERSION = '2.latest'
def test_interface_attach(self):
server = self._create_server()
output = self.nova("interface-attach --net-id %s %s" %
(self.network.id, server.id))
for key in ('ip_address', 'mac_addr', 'port_id', 'port_state'):
self._get_value_from_the_table(output, key)
self.assertEqual(
self.network.id,
self._get_value_from_the_table(output, 'net_id'))
class TestServeRebuildV274(base.ClientTestBase):
COMPUTE_API_VERSION = '2.74'
REBUILD_FIELDS = ["OS-DCF:diskConfig", "accessIPv4", "accessIPv6",
"adminPass", "created", "description",
"flavor", "hostId", "id", "image", "key_name",
"locked", "locked_reason", "metadata", "name",
"progress", "server_groups", "status", "tags",
"tenant_id", "trusted_image_certificates", "updated",
"user_data", "user_id"]
def test_rebuild(self):
server = self._create_server()
output = self.nova("rebuild %s %s" % (server.id, self.image.name))
for field in self.REBUILD_FIELDS:
self.assertIn(field, output)
class TestServeRebuildV275(TestServeRebuildV274):
COMPUTE_API_VERSION = '2.75'
REBUILD_FIELDS_V275 = ['OS-EXT-AZ:availability_zone', 'config_drive',
'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name',
'host_status',
'OS-SRV-USG:launched_at',
'OS-SRV-USG:terminated_at',
'OS-EXT-STS:task_state', 'OS-EXT-STS:vm_state',
'OS-EXT-STS:power_state', 'security_groups',
'os-extended-volumes:volumes_attached']
REBUILD_FIELDS = TestServeRebuildV274.REBUILD_FIELDS + REBUILD_FIELDS_V275
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import collections
import copy
import glob
import importlib
import itertools
import json
import os
import time
from bisect import bisect
from itertools import product
import demjson
import numpy as np
import torch
import yaml
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from torch_geometric.utils import remove_self_loops
def save_checkpoint(state, checkpoint_dir="checkpoints/"):
filename = os.path.join(checkpoint_dir, "checkpoint.pt")
torch.save(state, filename)
class Complete(object):
def __call__(self, data):
device = data.edge_index.device
row = torch.arange(data.num_nodes, dtype=torch.long, device=device)
col = torch.arange(data.num_nodes, dtype=torch.long, device=device)
row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)
col = col.repeat(data.num_nodes)
edge_index = torch.stack([row, col], dim=0)
edge_attr = None
if data.edge_attr is not None:
idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]
size = list(data.edge_attr.size())
size[0] = data.num_nodes * data.num_nodes
edge_attr = data.edge_attr.new_zeros(size)
edge_attr[idx] = data.edge_attr
edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
data.edge_attr = edge_attr
data.edge_index = edge_index
return data
def warmup_lr_lambda(current_epoch, optim_config):
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
if current_epoch <= optim_config["warmup_epochs"]:
alpha = current_epoch / float(optim_config["warmup_epochs"])
return optim_config["warmup_factor"] * (1.0 - alpha) + alpha
else:
idx = bisect(optim_config["lr_milestones"], current_epoch)
return pow(optim_config["lr_gamma"], idx)
def print_cuda_usage():
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print(
"Max Memory Allocated:",
torch.cuda.max_memory_allocated() / (1024 * 1024),
)
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def conditional_grad(dec):
"Decorator to enable/disable grad depending on whether force/energy predictions are being made"
# Adapted from https://stackoverflow.com/questions/60907323/accessing-class-property-as-decorator-argument
def decorator(func):
def cls_method(self, *args, **kwargs):
f = func
if self.regress_forces:
f = dec(func)
return f(self, *args, **kwargs)
return cls_method
return decorator
def plot_histogram(data, xlabel="", ylabel="", title=""):
assert isinstance(data, list)
# Preset
fig = Figure(figsize=(5, 4), dpi=150)
canvas = FigureCanvas(fig)
ax = fig.gca()
# Plot
ax.hist(data, bins=20, rwidth=0.9, zorder=3)
# Axes
ax.grid(color="0.95", zorder=0)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
fig.tight_layout(pad=2)
# Return numpy array
canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
image_from_plot = image_from_plot.reshape(
fig.canvas.get_width_height()[::-1] + (3,)
)
return image_from_plot
# Override the collation method in `pytorch_geometric.data.InMemoryDataset`
def collate(data_list):
keys = data_list[0].keys
data = data_list[0].__class__()
for key in keys:
data[key] = []
slices = {key: [0] for key in keys}
for item, key in product(data_list, keys):
data[key].append(item[key])
if torch.is_tensor(item[key]):
s = slices[key][-1] + item[key].size(
item.__cat_dim__(key, item[key])
)
elif isinstance(item[key], int) or isinstance(item[key], float):
s = slices[key][-1] + 1
else:
raise ValueError("Unsupported attribute type")
slices[key].append(s)
if hasattr(data_list[0], "__num_nodes__"):
data.__num_nodes__ = []
for item in data_list:
data.__num_nodes__.append(item.num_nodes)
for key in keys:
if torch.is_tensor(data_list[0][key]):
data[key] = torch.cat(
data[key], dim=data.__cat_dim__(key, data_list[0][key])
)
else:
data[key] = torch.tensor(data[key])
slices[key] = torch.tensor(slices[key], dtype=torch.long)
return data, slices
def add_edge_distance_to_graph(
batch,
device="cpu",
dmin=0.0,
dmax=6.0,
num_gaussians=50,
):
# Make sure x has positions.
if not all(batch.pos[0][:] == batch.x[0][-3:]):
batch.x = torch.cat([batch.x, batch.pos.float()], dim=1)
# First set computations to be tracked for positions.
batch.x = batch.x.requires_grad_(True)
# Then compute Euclidean distance between edge endpoints.
pdist = torch.nn.PairwiseDistance(p=2.0)
distances = pdist(
batch.x[batch.edge_index[0]][:, -3:],
batch.x[batch.edge_index[1]][:, -3:],
)
# Expand it using a gaussian basis filter.
gdf_filter = torch.linspace(dmin, dmax, num_gaussians)
var = gdf_filter[1] - gdf_filter[0]
gdf_filter, var = gdf_filter.to(device), var.to(device)
gdf_distances = torch.exp(
-((distances.view(-1, 1) - gdf_filter) ** 2) / var ** 2
)
# Reassign edge attributes.
batch.edge_weight = distances
batch.edge_attr = gdf_distances.float()
return batch
# Copied from https://github.com/facebookresearch/mmf/blob/master/mmf/utils/env.py#L89.
def setup_imports():
from ocpmodels.common.registry import registry
# First, check if imports are already setup
has_already_setup = registry.get("imports_setup", no_warning=True)
if has_already_setup:
return
# Automatically load all of the modules, so that
# they register with registry
root_folder = registry.get("ocpmodels_root", no_warning=True)
if root_folder is None:
root_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(root_folder, "..")
trainer_folder = os.path.join(root_folder, "trainers")
trainer_pattern = os.path.join(trainer_folder, "**", "*.py")
datasets_folder = os.path.join(root_folder, "datasets")
datasets_pattern = os.path.join(datasets_folder, "*.py")
model_folder = os.path.join(root_folder, "models")
model_pattern = os.path.join(model_folder, "*.py")
importlib.import_module("ocpmodels.common.meter")
files = (
glob.glob(datasets_pattern, recursive=True)
+ glob.glob(model_pattern, recursive=True)
+ glob.glob(trainer_pattern, recursive=True)
)
for f in files:
for key in ["/trainers", "/datasets", "/models"]:
if f.find(key) != -1:
splits = f.split(os.sep)
file_name = splits[-1]
module_name = file_name[: file_name.find(".py")]
importlib.import_module(
"ocpmodels.%s.%s" % (key[1:], module_name)
)
registry.register("imports_setup", True)
def create_config_dict(args):
overrides = {}
for arg in args:
arg = arg.strip("--")
key, val = arg.split("=")
overrides[key] = val
return overrides
def update_config(original, update):
"""
Recursively update a dict.
Parameters must be specified in original to be overwritten
"""
for basekey, baseval in original.items():
if isinstance(baseval, dict):
for key, val in baseval.items():
if key in update:
original[basekey][key] = demjson.decode(update[key])
return original
def build_config(args, args_override):
config = yaml.safe_load(open(args.config_yml, "r"))
# Load config from included files.
includes = config.get("includes", [])
if not isinstance(includes, list):
raise AttributeError(
"Includes must be a list, {} provided".format(type(includes))
)
for include in includes:
include_config = yaml.safe_load(open(include, "r"))
config.update(include_config)
if includes != []:
config.pop("includes")
# Check for overriden parameters.
if args_override != []:
overrides = create_config_dict(args_override)
config = update_config(config, overrides)
# Some other flags.
config["mode"] = args.mode
config["identifier"] = args.identifier
config["seed"] = args.seed
config["is_debug"] = args.debug
config["run_dir"] = args.run_dir
config["is_vis"] = args.vis
config["print_every"] = args.print_every
config["amp"] = args.amp
config["checkpoint"] = args.checkpoint
config["cpu"] = args.cpu
# Submit
config["submit"] = args.submit
# Distributed
config["local_rank"] = args.local_rank
config["distributed_port"] = args.distributed_port
config["world_size"] = args.num_nodes * args.num_gpus
config["distributed_backend"] = args.distributed_backend
return config
def create_grid(base_config, sweep_file):
def _flatten_sweeps(sweeps, root_key="", sep="."):
flat_sweeps = []
for key, value in sweeps.items():
new_key = root_key + sep + key if root_key else key
if isinstance(value, collections.MutableMapping):
flat_sweeps.extend(_flatten_sweeps(value, new_key).items())
else:
flat_sweeps.append((new_key, value))
return collections.OrderedDict(flat_sweeps)
def _update_config(config, keys, override_vals, sep="."):
for key, value in zip(keys, override_vals):
key_path = key.split(sep)
child_config = config
for name in key_path[:-1]:
child_config = child_config[name]
child_config[key_path[-1]] = value
return config
sweeps = yaml.safe_load(open(sweep_file, "r"))
flat_sweeps = _flatten_sweeps(sweeps)
keys = list(flat_sweeps.keys())
values = list(itertools.product(*flat_sweeps.values()))
configs = []
for i, override_vals in enumerate(values):
config = copy.deepcopy(base_config)
config = _update_config(config, keys, override_vals)
config["identifier"] = config["identifier"] + f"_run{i}"
configs.append(config)
return configs
def save_experiment_log(args, jobs, configs):
log_file = args.logdir / "exp" / time.strftime("%Y-%m-%d-%I-%M-%S%p.log")
log_file.parent.mkdir(exist_ok=True, parents=True)
with open(log_file, "w") as f:
for job, config in zip(jobs, configs):
print(
json.dumps(
{
"config": config,
"slurm_id": job.job_id,
"timestamp": time.strftime("%I:%M:%S%p %Z %b %d, %Y"),
}
),
file=f,
)
return log_file
def get_pbc_distances(
pos,
edge_index,
cell,
cell_offsets,
neighbors,
return_offsets=False,
return_distance_vec=False,
):
row, col = edge_index
distance_vectors = pos[row] - pos[col]
# correct for pbc
neighbors = neighbors.to(cell.device)
cell = torch.repeat_interleave(cell, neighbors, dim=0)
offsets = cell_offsets.float().view(-1, 1, 3).bmm(cell.float()).view(-1, 3)
distance_vectors += offsets
# compute distances
distances = distance_vectors.norm(dim=-1)
# redundancy: remove zero distances
nonzero_idx = torch.arange(len(distances))[distances != 0]
edge_index = edge_index[:, nonzero_idx]
distances = distances[nonzero_idx]
out = {
"edge_index": edge_index,
"distances": distances,
}
if return_distance_vec:
out["distance_vec"] = distance_vectors[nonzero_idx]
if return_offsets:
out["offsets"] = offsets[nonzero_idx]
return out
def radius_graph_pbc(data, radius, max_num_neighbors_threshold, device):
batch_size = len(data.natoms)
# position of the atoms
atom_pos = data.pos
# Before computing the pairwise distances between atoms, first create a list of atom indices to compare for the entire batch
num_atoms_per_image = data.natoms
num_atoms_per_image_sqr = (num_atoms_per_image ** 2).long()
# index offset between images
index_offset = (
torch.cumsum(num_atoms_per_image, dim=0) - num_atoms_per_image
)
index_offset_expand = torch.repeat_interleave(
index_offset, num_atoms_per_image_sqr
)
num_atoms_per_image_expand = torch.repeat_interleave(
num_atoms_per_image, num_atoms_per_image_sqr
)
# Compute a tensor containing sequences of numbers that range from 0 to num_atoms_per_image_sqr for each image
# that is used to compute indices for the pairs of atoms. This is a very convoluted way to implement
# the following (but 10x faster since it removes the for loop)
# for batch_idx in range(batch_size):
# batch_count = torch.cat([batch_count, torch.arange(num_atoms_per_image_sqr[batch_idx], device=device)], dim=0)
num_atom_pairs = torch.sum(num_atoms_per_image_sqr)
index_sqr_offset = (
torch.cumsum(num_atoms_per_image_sqr, dim=0) - num_atoms_per_image_sqr
)
index_sqr_offset = torch.repeat_interleave(
index_sqr_offset, num_atoms_per_image_sqr
)
atom_count_sqr = (
torch.arange(num_atom_pairs, device=device) - index_sqr_offset
)
# Compute the indices for the pairs of atoms (using division and mod)
# If the systems get too large this apporach could run into numerical precision issues
index1 = (
(atom_count_sqr // num_atoms_per_image_expand)
).long() + index_offset_expand
index2 = (
atom_count_sqr % num_atoms_per_image_expand
).long() + index_offset_expand
# Get the positions for each atom
pos1 = torch.index_select(atom_pos, 0, index1)
pos2 = torch.index_select(atom_pos, 0, index2)
# Tensor of unit cells. Assumes 9 cells in -1, 0, 1 offsets in the x and y dimensions
unit_cell = torch.tensor(
[
[-1, -1, 0],
[-1, 0, 0],
[-1, 1, 0],
[0, -1, 0],
[0, 0, 0],
[0, 1, 0],
[1, -1, 0],
[1, 0, 0],
[1, 1, 0],
],
device=device,
).float()
num_cells = len(unit_cell)
unit_cell_per_atom = unit_cell.view(1, num_cells, 3).repeat(
len(index2), 1, 1
)
unit_cell = torch.transpose(unit_cell, 0, 1)
unit_cell_batch = unit_cell.view(1, 3, num_cells).expand(
batch_size, -1, -1
)
# Compute the x, y, z positional offsets for each cell in each image
data_cell = torch.transpose(data.cell, 1, 2)
pbc_offsets = torch.bmm(data_cell, unit_cell_batch)
pbc_offsets_per_atom = torch.repeat_interleave(
pbc_offsets, num_atoms_per_image_sqr, dim=0
)
# Expand the positions and indices for the 9 cells
pos1 = pos1.view(-1, 3, 1).expand(-1, -1, num_cells)
pos2 = pos2.view(-1, 3, 1).expand(-1, -1, num_cells)
index1 = index1.view(-1, 1).repeat(1, num_cells).view(-1)
index2 = index2.view(-1, 1).repeat(1, num_cells).view(-1)
# Add the PBC offsets for the second atom
pos2 = pos2 + pbc_offsets_per_atom
# Compute the squared distance between atoms
atom_distance_sqr = torch.sum((pos1 - pos2) ** 2, dim=1)
atom_distance_sqr = atom_distance_sqr.view(-1)
# Remove pairs that are too far apart
mask_within_radius = torch.le(atom_distance_sqr, radius * radius)
# Remove pairs with the same atoms (distance = 0.0)
mask_not_same = torch.gt(atom_distance_sqr, 0.0001)
mask = torch.logical_and(mask_within_radius, mask_not_same)
index1 = torch.masked_select(index1, mask)
index2 = torch.masked_select(index2, mask)
unit_cell = torch.masked_select(
unit_cell_per_atom.view(-1, 3), mask.view(-1, 1).expand(-1, 3)
)
unit_cell = unit_cell.view(-1, 3)
num_atoms = len(data.pos)
num_neighbors = torch.zeros(num_atoms, device=device)
num_neighbors.index_add_(0, index1, torch.ones(len(index1), device=device))
num_neighbors = num_neighbors.long()
max_num_neighbors = torch.max(num_neighbors).long()
# Compute neighbors per image
_max_neighbors = copy.deepcopy(num_neighbors)
_max_neighbors[
_max_neighbors > max_num_neighbors_threshold
] = max_num_neighbors_threshold
_num_neighbors = torch.zeros(num_atoms + 1, device=device).long()
_natoms = torch.zeros(data.natoms.shape[0] + 1, device=device).long()
_num_neighbors[1:] = torch.cumsum(_max_neighbors, dim=0)
_natoms[1:] = torch.cumsum(data.natoms, dim=0)
num_neighbors_image = (
_num_neighbors[_natoms[1:]] - _num_neighbors[_natoms[:-1]]
)
# If max_num_neighbors is below the threshold, return early
if (
max_num_neighbors <= max_num_neighbors_threshold
or max_num_neighbors_threshold <= 0
):
return torch.stack((index2, index1)), unit_cell, num_neighbors_image
atom_distance_sqr = torch.masked_select(atom_distance_sqr, mask)
# Create a tensor of size [num_atoms, max_num_neighbors] to sort the distances of the neighbors.
# Fill with values greater than radius*radius so we can easily remove unused distances later.
distance_sort = torch.zeros(
num_atoms * max_num_neighbors, device=device
).fill_(radius * radius + 1.0)
# Create an index map to map distances from atom_distance_sqr to distance_sort
index_neighbor_offset = torch.cumsum(num_neighbors, dim=0) - num_neighbors
index_neighbor_offset_expand = torch.repeat_interleave(
index_neighbor_offset, num_neighbors
)
index_sort_map = (
index1 * max_num_neighbors
+ torch.arange(len(index1), device=device)
- index_neighbor_offset_expand
)
distance_sort.index_copy_(0, index_sort_map, atom_distance_sqr)
distance_sort = distance_sort.view(num_atoms, max_num_neighbors)
# Sort neighboring atoms based on distance
distance_sort, index_sort = torch.sort(distance_sort, dim=1)
# Select the max_num_neighbors_threshold neighbors that are closest
distance_sort = distance_sort[:, :max_num_neighbors_threshold]
index_sort = index_sort[:, :max_num_neighbors_threshold]
# Offset index_sort so that it indexes into index1
index_sort = index_sort + index_neighbor_offset.view(-1, 1).expand(
-1, max_num_neighbors_threshold
)
# Remove "unused pairs" with distances greater than the radius
mask_within_radius = torch.le(distance_sort, radius * radius)
index_sort = torch.masked_select(index_sort, mask_within_radius)
# At this point index_sort contains the index into index1 of the closest max_num_neighbors_threshold neighbors per atom
# Create a mask to remove all pairs not in index_sort
mask_num_neighbors = torch.zeros(len(index1), device=device).bool()
mask_num_neighbors.index_fill_(0, index_sort, True)
# Finally mask out the atoms to ensure each atom has at most max_num_neighbors_threshold neighbors
index1 = torch.masked_select(index1, mask_num_neighbors)
index2 = torch.masked_select(index2, mask_num_neighbors)
unit_cell = torch.masked_select(
unit_cell.view(-1, 3), mask_num_neighbors.view(-1, 1).expand(-1, 3)
)
unit_cell = unit_cell.view(-1, 3)
edge_index = torch.stack((index2, index1))
return edge_index, unit_cell, num_neighbors_image
def get_pruned_edge_idx(edge_index, num_atoms=None, max_neigh=1e9):
assert num_atoms is not None
# removes neighbors > max_neigh
# assumes neighbors are sorted in increasing distance
_nonmax_idx = []
for i in range(num_atoms):
idx_i = torch.arange(len(edge_index[1]))[(edge_index[1] == i)][
:max_neigh
]
_nonmax_idx.append(idx_i)
_nonmax_idx = torch.cat(_nonmax_idx)
return _nonmax_idx
|
import sys
V = int(input())
n = int(input())
i = 0
for cur in input().split():
if (V == int(cur)):
print(i)
i += 1
|
"""init db
Revision ID: 9912396391c9
Revises:
Create Date: 2020-08-02 01:10:17.388059
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9912396391c9'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
genre_table = op.create_table('genres',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('external_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_genres_external_id'), 'genres', ['external_id'], unique=False)
op.create_index(op.f('ix_genres_id'), 'genres', ['id'], unique=False)
op.create_table('movies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('external_id', sa.Integer(), nullable=False),
sa.Column('popularity', sa.Float(), nullable=True),
sa.Column('vote_count', sa.Integer(), nullable=True),
sa.Column('video', sa.Boolean(), nullable=True),
sa.Column('poster_path', sa.String(), nullable=True),
sa.Column('adult', sa.Boolean(), nullable=True),
sa.Column('backdrop_path', sa.String(), nullable=True),
sa.Column('original_language', sa.String(), nullable=True),
sa.Column('original_title', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=False),
sa.Column('vote_average', sa.Float(), nullable=True),
sa.Column('overview', sa.String(), nullable=True),
sa.Column('release_date', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_movies_external_id'), 'movies', ['external_id'], unique=False)
op.create_index(op.f('ix_movies_id'), 'movies', ['id'], unique=False)
op.create_table('movie_genres',
sa.Column('movie_id', sa.Integer(), nullable=True),
sa.Column('genre_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['genre_id'], ['genres.id'], ),
sa.ForeignKeyConstraint(['movie_id'], ['movies.id'], )
)
# All the movie genres from the Movie DB API.
op.bulk_insert(
genre_table,
[
{
"external_id": 28,
"name": "Action"
},
{
"external_id": 12,
"name": "Adventure"
},
{
"external_id": 16,
"name": "Animation"
},
{
"external_id": 35,
"name": "Comedy"
},
{
"external_id": 80,
"name": "Crime"
},
{
"external_id": 99,
"name": "Documentary"
},
{
"external_id": 18,
"name": "Drama"
},
{
"external_id": 10751,
"name": "Family"
},
{
"external_id": 14,
"name": "Fantasy"
},
{
"external_id": 36,
"name": "History"
},
{
"external_id": 27,
"name": "Horror"
},
{
"external_id": 10402,
"name": "Music"
},
{
"external_id": 9648,
"name": "Mystery"
},
{
"external_id": 10749,
"name": "Romance"
},
{
"external_id": 878,
"name": "Science Fiction"
},
{
"external_id": 10770,
"name": "TV Movie"
},
{
"external_id": 53,
"name": "Thriller"
},
{
"external_id": 10752,
"name": "War"
},
{
"external_id": 37,
"name": "Western"
}
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movie_genres')
op.drop_index(op.f('ix_movies_id'), table_name='movies')
op.drop_index(op.f('ix_movies_external_id'), table_name='movies')
op.drop_table('movies')
op.drop_index(op.f('ix_genres_id'), table_name='genres')
op.drop_index(op.f('ix_genres_external_id'), table_name='genres')
op.drop_table('genres')
# ### end Alembic commands ###
|
try:
import MySQLdb
except:
import pymysql as MySQLdb
import os
import psycopg2
import pymssql
import sqlite3
# 不好意思,DB_CONNECTIONS has to be reinitialized, hence defined in user.py
TERMINAL_TOKENS = {
'psql': ["'", '$$'],
'mssql': ["'"]
}
|
"""
Description:
Base classes to quickly derive other objects coming from JSON
Contributors:
- Patrick Hennessy
"""
import enum
from datetime import datetime
import time
import dateparser
class ModelMissingRequiredKeyError(Exception):
pass
class ModelValidationError(Exception):
pass
class ImmutableFieldError(Exception):
pass
class Model(object):
"""
Base class for data bound objects
Allows consumers to instaniate new instances from json (marshaling)
Able to serialize back to json from it's object format
__repr_keys__ allows consumers to designate what the repr will print out
"""
__repr_keys__ = []
__immutable_fields__ = []
@classmethod
def marshal(cls, data):
"""
Create a new instance of the class from the JSON data passed in
"""
new_obj = cls()
new_obj.__fields__ = {}
new_obj.__immutable_fields__ = []
# Get all fields from super classes
fields = {}
for base in cls.__bases__:
fields.update(base.__dict__.items())
fields.update(cls.__dict__.items())
for field_name, field in fields.items():
if not isinstance(field, Field):
continue
new_obj.__fields__[field_name] = field
json_key = field.json_key if field.json_key else field_name
json_data = data.get(json_key, None)
if json_data is None and field.required:
raise ModelMissingRequiredKeyError(f"{cls.__name__} model is missing required key {json_key}")
elif json_data is None:
setattr(new_obj, field_name, field.default)
else:
attr = field.marshal(json_data)
setattr(new_obj, field_name, attr)
if field.immutable:
new_obj.__immutable_fields__.append(field_name)
return new_obj
def remarshal(self, data):
"""
Apply updates to existing fields given JSON data
Fails if an update comes to an immutable field
"""
for field_name, field in self.__fields__.items():
if not isinstance(field, Field):
continue
json_key = field.json_key if field.json_key else field_name
if json_key not in data.keys():
continue
json_data = data[json_key]
attr = field.marshal(json_data)
if field.immutable and not getattr(self, field_name) == attr:
raise ImmutableFieldError(f"Field {field_name} cannot be updated")
setattr(self, field_name, attr)
def serialize(self):
"""
Turn object back into a JSON-ified object
"""
dct = {}
for item, field in self.__fields__.items():
attr = getattr(self, item)
key = field.json_key if field.json_key != item and field.json_key is not None else item
if isinstance(attr, Model):
dct[key] = attr.serialize()
elif isinstance(attr, Enum):
dct[key] = attr.value
elif isinstance(attr, list):
dct[key] = []
for child in attr:
if isinstance(child, Model):
dct[key].append(child.serialize())
elif isinstance(child, Enum):
dct[key].append(child.value)
else:
dct[key].append(child)
else:
dct[key] = attr
return dct
def merge(self, new_obj, preserve=None):
"""
Sometimes events come from the websocket that will
have updates to existing model objects. This method
will get new values from the new object but will
not update any fields in preserve
"""
for field_name in self.__fields__:
if preserve and field_name in preserve:
continue
setattr(self, field_name, getattr(new_obj, field_name))
def __repr__(self):
"""
Pretty repr that allows models to specify keys to use
"""
classname = f"{type(self).__name__}"
items = []
for key in self.__repr_keys__:
value = str(getattr(self, key))
items.append(f"{key}=\"{value}\"")
if len(items) > 0:
return "{}({})".format(classname, ", ".join(items))
else:
return "{}()".format(classname)
def __setattr__(self, name, value):
if name in self.__immutable_fields__:
raise ImmutableFieldError(f"Field \"{name}\" is immutable, cannot be changed")
else:
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in self.__immutable_fields__:
raise ImmutableFieldError(f"Field \"{name}\" is immutable, cannot be deleted")
else:
return object.__delattr__(self, name)
class Field(object):
"""
Conterpart class for Models, instructing the model how to consume json data
Includes a nice repr
"""
def __init__(self, typ, default=None, required=False, json_key=None, max_length=-1, immutable=False, nullable=True):
self.type = typ
self.default = default
self.required = required
self.json_key = json_key
self.max_length = max_length
self.immutable = immutable
self.nullable = nullable
def __repr__(self):
if self.required is True:
return f"Field({self.type.__name__}, required=True)"
else:
return f"Field({self.type.__name__})"
def marshal(self, data):
# Recursively marshal types of other models
if issubclass(self.type, Model):
return self.type.marshal(data)
else:
# Check if data is able to be None type
if data is None:
if self.nullable is True:
return None
else:
raise ModelValidationError(f"Field cannot be nullified")
# Cast it to intended type
value = self.type(data)
# Validate field
if not self.max_length == -1 and len(value) > self.max_length:
raise ModelValidationError("Length of input is too long")
return value
class ListField(Field):
"""
Subclass of Field that allows one to create a list of some primitive
"""
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
self.default = kwargs.get('default', SearchableList())
def marshal(self, data):
if not self.max_length == -1 and len(data) > self.max_length:
raise ModelValidationError("Length of input is too long")
if not isinstance(data, list):
raise ModelValidationError("Input data is not of type list")
ret_list = SearchableList()
for item in data:
if issubclass(self.type, Model):
ret_list.append(self.type.marshal(item))
else:
ret_list.append(self.type(item))
return ret_list
class Enum(enum.Enum):
"""
A nice repr for enums
"""
def __repr__(self):
return f"{self.__class__.__name__}.{self._name_}"
class Snowflake(str):
def __init__(self, value):
if value and isinstance(value, str):
value = int(value)
self.timestamp = int(((value >> 22) + 1420070400000) / 1000)
self.internal_worker_id = (value & 0x3E0000) >> 17
self.internal_process_id = (value & 0x1F000) >> 12
self.increment = value & 0xFFF
self.value = value
def __str__(self):
return str(self.value)
def __hash__(self):
return self.value
def __repr__(self):
return str(self.value)
@property
def datetime(self):
return datetime.fromtimestamp(self.timestamp)
class Timestamp():
def __init__(self, iso_date):
self.datetime = dateparser.parse(iso_date)
def __repr__(self):
return f"{self.__class__.__name__}({self.timestamp})"
@property
def timestamp(self):
return int(self.datetime.timestamp())
@classmethod
def from_unix(cls, ts):
dt = datetime.fromtimestamp(ts)
iso_date = str(dt.isoformat()) + ":.0" + time.strftime('%z')
return cls(iso_date)
class SearchableList(list):
"""
Subclass of List that allows for Mongo-esque querying of contents
Example:
users.find(name="Pat")
users.filter(lambda user: user.height > 5)
"""
def find(self, *args, **kwargs):
for item in self.__iter__():
for key, value in kwargs.items():
attr = getattr(item, key, None)
attr_type = type(attr)
if issubclass(attr_type, (int, bool, str, float)):
if (attr == attr_type(value)) is False:
break
else:
return item
return None
def filter(self, expression):
return list(filter(expression, self.__iter__()))
def upsert(self, new_item):
for index, item in enumerate(self):
if item == new_item:
self[index] = new_item
break
else:
self.append(new_item)
class SearchableDict(dict):
"""
Subclass of Dict that allows for Mongo-esque querying of contents
Example:
users.find(name="Pat")
users.filter(lambda user: user.height > 5)
"""
def find(self, *args, **kwargs):
for _, item in self.items():
for key, value in kwargs.items():
attr = getattr(item, key, None)
attr_type = type(attr)
if issubclass(attr_type, (int, bool, str, float)):
if (attr == attr_type(value)) is False:
break
else:
return item
return None
def filter(self, expression):
for _, item in self.items():
if expression(item) is True:
yield item
|
"""Configuration for the package is handled in this wrapper for confuse."""
import argparse
from pathlib import Path
from typing import Union
import confuse
from pandas_profiling.utils.paths import get_config_default
class Config(object):
"""This is a wrapper for the python confuse package, which handles setting and getting configuration variables via
various ways (notably via argparse and kwargs).
"""
config = None
"""The confuse.Configuration object."""
def __init__(self):
"""The config constructor should be called only once."""
if self.config is None:
self.clear()
else:
self.set_file(str(get_config_default()))
def set_file(self, file_name: Union[str, Path]) -> None:
"""
Set the config from a file
Args:
file_name: file name
"""
if self.config is not None:
self.config.set_file(str(file_name))
def set_args(self, namespace: argparse.Namespace, dots: bool) -> None:
"""
Set config variables based on the argparse Namespace object.
Args:
namespace: Dictionary or Namespace to overlay this config with. Supports nested Dictionaries and Namespaces.
dots: If True, any properties on namespace that contain dots (.) will be broken down into child dictionaries.
"""
if self.config is not None:
self.config.set_args(namespace, dots)
def _set_kwargs(self, reference, values: dict):
"""Helper function to set config variables based on kwargs."""
for key, value in values.items():
if key in reference:
if type(value) == dict:
self._set_kwargs(reference[key], value)
else:
reference[key].set(value)
else:
raise ValueError(f'Config parameter "{key}" does not exist.')
_shorthands = {
"samples": {"head": 0, "tail": 0},
"duplicates": {"head": 0},
"interactions": {"targets": [], "continuous": False},
"missing_diagrams": {
"bar": False,
"matrix": False,
"heatmap": False,
"dendrogram": False,
},
"correlations": {
"pearson": {"calculate": False},
"spearman": {"calculate": False},
"kendall": {"calculate": False},
"phi_k": {"calculate": False},
"cramers": {"calculate": False},
},
}
def _handle_shorthands(self, kwargs):
for key, value in self._shorthands.items():
if key in kwargs and kwargs[key] is None:
kwargs[key] = value
return kwargs
def _handle_shorthand(self, key, value):
if key in self._shorthands and value is None:
return self._shorthands[key]
else:
return value
def set_kwargs(self, kwargs) -> None:
"""
Helper function to set config variables based on kwargs.
Args:
kwargs: the arguments passed to the .profile_report() function
"""
kwargs = self._handle_shorthands(kwargs)
self._set_kwargs(self.config, kwargs)
def __getitem__(self, item):
return self.config[item]
def __setitem__(self, key, value):
value = self._handle_shorthand(key, value)
self.config[key].set(value)
def dump(self):
return self.config.dump()
def update(self, other):
if not isinstance(other, Config):
raise ValueError("Can only update config from a config object")
self.config = other.config
def clear(self):
self.config = confuse.Configuration("PandasProfiling", __name__, read=False)
self.set_file(str(get_config_default()))
@property
def is_default(self):
default_config = Config()
return self == default_config
def __eq__(self, other):
return isinstance(other, Config) and self.dump() == other.dump()
config = Config()
|
"""
Functions to read and write ASCII model (.dat) files used by SPECFEM2D
"""
import os
import numpy as np
from glob import glob
from shutil import copyfile
from seisflows3.tools.tools import iterable
def read_slice(path, parameters, iproc):
"""
Reads SPECFEM model slice(s) based on .dat ASCII files
:type path: str
:param path: path to the database files
:type parameters: str
:param parameters: parameters to read, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to read
:rtype: list of np.array
:return: list of arrays corresponding to model parameters in given order
"""
filename = _get_filename(path, iproc)
available_parameters = _get_available_parameters(filename)
model = np.loadtxt(filename).T
vals = []
for key in iterable(parameters):
vals += [model[available_parameters.index(key)]]
return vals
def write_slice(data, path, parameters, iproc):
"""
Writes SPECFEM model slice
!!! This won't work because we need access to the spatial components that
!!! are only the model
:type data: seisflows.Container
:param data: data to be written to a slice
:type path: str
:param path: path to the database files
:type parameters: str
:param parameters: parameters to write, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to write
"""
for key in iterable(parameters):
filename = os.path.join(path, f"proc{int(iproc):06d}_{key}.bin")
_write(data, filename)
def copy_slice(src, dst, iproc, parameter):
"""
Copies SPECFEM model slice
:type src: str
:param src: source location to copy slice from
:type dst: str
:param dst: destination location to copy slice to
:type parameter: str
:param parameter: parameters to copy, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to copy
"""
filename = os.path.basename(_get_filename(src, iproc))
copyfile(os.path.join(src, filename),
os.path.join(dst, filename))
def _get_filename(path, iproc):
"""
ASCII .dat files list the available parameters in the fileid, meaning
there is no standard format for retrieving files. Use glob to search for
the file based on file extension.
:type path: str
:param path: path to the database files
:type iproc: int
:param iproc: processor/slice number to read
:rtype: str
:return: filename of the model
"""
filename_glob = os.path.join(path, f"proc{int(iproc):06d}_*.dat")
filename = glob(filename_glob)
assert(len(filename) == 1),
f"Expected only one .dat file, found {len(filename)}"
return filename[0]
def _get_available_parameters(filename):
"""
The available parameters are listed in the file name. Split off the
uncessary text and return the listend parameters.
:type filename: str
:param filename: filename to check parameters from
:rtype: list
:return: list of parameters from the file id
"""
fid = os.path.splitext(os.path.basename(filename))[0]
_, *available_parameters = fid.split("_")
return available_parameters
def _write(v, filename):
"""
Writes Fortran style binary files
Data are written as single precision floating point numbers
"""
n = np.array([4 * len(v)], dtype='int32')
v = np.array(v, dtype='float32')
with open(filename, 'wb') as file:
n.tofile(file)
v.tofile(file)
n.tofile(file)
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class UpdateServiceNotification(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.Update`.
Details:
- Layer: ``117``
- ID: ``0xebe46819``
Parameters:
type: ``str``
message: ``str``
media: :obj:`MessageMedia <pyrogram.raw.base.MessageMedia>`
entities: List of :obj:`MessageEntity <pyrogram.raw.base.MessageEntity>`
popup (optional): ``bool``
inbox_date (optional): ``int`` ``32-bit``
"""
__slots__: List[str] = ["type", "message", "media", "entities", "popup", "inbox_date"]
ID = 0xebe46819
QUALNAME = "types.UpdateServiceNotification"
def __init__(self, *, type: str, message: str, media: "raw.base.MessageMedia", entities: List["raw.base.MessageEntity"], popup: Union[None, bool] = None, inbox_date: Union[None, int] = None) -> None:
self.type = type # string
self.message = message # string
self.media = media # MessageMedia
self.entities = entities # Vector<MessageEntity>
self.popup = popup # flags.0?true
self.inbox_date = inbox_date # flags.1?int
@staticmethod
def read(data: BytesIO, *args: Any) -> "UpdateServiceNotification":
flags = Int.read(data)
popup = True if flags & (1 << 0) else False
inbox_date = Int.read(data) if flags & (1 << 1) else None
type = String.read(data)
message = String.read(data)
media = TLObject.read(data)
entities = TLObject.read(data)
return UpdateServiceNotification(type=type, message=message, media=media, entities=entities, popup=popup, inbox_date=inbox_date)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.popup is not None else 0
flags |= (1 << 1) if self.inbox_date is not None else 0
data.write(Int(flags))
if self.inbox_date is not None:
data.write(Int(self.inbox_date))
data.write(String(self.type))
data.write(String(self.message))
data.write(self.media.write())
data.write(Vector(self.entities))
return data.getvalue()
|
# -*- coding=utf-8 -*-
import datetime
import pathlib
import os
import re
import sys
import invoke
from parver import Version
from towncrier._builder import (
find_fragments, render_fragments, split_fragments
)
from towncrier._settings import load_config
from pipenv.__version__ import __version__
from pipenv.vendor.vistir.contextmanagers import temp_environ
from .vendoring import _get_git_root, drop_dir
VERSION_FILE = 'pipenv/__version__.py'
ROOT = pathlib.Path(".").parent.parent.absolute()
PACKAGE_NAME = "pipenv"
def log(msg):
print('[release] %s' % msg)
def get_version_file(ctx):
return _get_git_root(ctx).joinpath(VERSION_FILE)
def find_version(ctx):
version_file = get_version_file(ctx).read_text()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_history_file(ctx):
return _get_git_root(ctx).joinpath('HISTORY.txt')
def get_dist_dir(ctx):
return _get_git_root(ctx) / 'dist'
def get_build_dir(ctx):
return _get_git_root(ctx) / 'build'
def _render_log():
"""Totally tap into Towncrier internals to get an in-memory result.
"""
config = load_config(ROOT)
definitions = config['types']
fragments, fragment_filenames = find_fragments(
pathlib.Path(config['directory']).absolute(),
config['sections'],
None,
definitions,
)
rendered = render_fragments(
pathlib.Path(config['template']).read_text(encoding='utf-8'),
config['issue_format'],
split_fragments(fragments, definitions),
definitions,
config['underlines'][1:],
False, # Don't add newlines to wrapped text.
)
return rendered
@invoke.task
def release(ctx, dry_run=False):
drop_dist_dirs(ctx)
bump_version(ctx, dry_run=dry_run)
version = find_version(ctx)
tag_content = _render_log()
if dry_run:
ctx.run('towncrier --draft > CHANGELOG.draft.rst')
log('would remove: news/*')
log('would remove: CHANGELOG.draft.rst')
log(f'Would commit with message: "Release v{version}"')
else:
ctx.run('towncrier')
ctx.run("git add CHANGELOG.rst news/")
ctx.run("git rm CHANGELOG.draft.rst")
ctx.run(f'git commit -m "Release v{version}"')
tag_content = tag_content.replace('"', '\\"')
if dry_run:
log(f"Generated tag content: {tag_content}")
markdown = ctx.run("pandoc CHANGELOG.draft.rst -f rst -t markdown", hide=True).stdout.strip()
content = clean_mdchangelog(ctx, markdown)
log(f"would generate markdown: {content}")
else:
generate_markdown(ctx)
clean_mdchangelog(ctx)
ctx.run(f'git tag -a v{version} -m "Version v{version}\n\n{tag_content}"')
build_dists(ctx)
if dry_run:
dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*'
artifacts = list(ROOT.joinpath('dist').glob(dist_pattern))
filename_display = '\n'.join(f' {a}' for a in artifacts)
log(f"Would upload dists: {filename_display}")
else:
upload_dists(ctx)
bump_version(ctx, dev=True)
def drop_dist_dirs(ctx):
log('Dropping Dist dir...')
drop_dir(get_dist_dir(ctx))
log('Dropping build dir...')
drop_dir(get_build_dir(ctx))
@invoke.task
def build_dists(ctx):
drop_dist_dirs(ctx)
for py_version in ['3.6', '2.7']:
env = {'PIPENV_PYTHON': py_version}
with ctx.cd(ROOT.as_posix()), temp_environ():
executable = ctx.run("python -c 'import sys; print(sys.executable)'", hide=True).stdout.strip()
log('Building sdist using %s ....' % executable)
os.environ["PIPENV_PYTHON"] = py_version
ctx.run('pipenv install --dev', env=env)
ctx.run('pipenv run pip install -e . --upgrade --upgrade-strategy=eager', env=env)
log('Building wheel using python %s ....' % py_version)
if py_version == '3.6':
ctx.run('pipenv run python setup.py sdist bdist_wheel', env=env)
else:
ctx.run('pipenv run python setup.py bdist_wheel', env=env)
@invoke.task(build_dists)
def upload_dists(ctx, repo="pypi"):
dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*'
artifacts = list(ROOT.joinpath('dist').glob(dist_pattern))
filename_display = '\n'.join(f' {a}' for a in artifacts)
print(f'[release] Will upload:\n{filename_display}')
try:
input('[release] Release ready. ENTER to upload, CTRL-C to abort: ')
except KeyboardInterrupt:
print('\nAborted!')
return
arg_display = ' '.join(f'"{n}"' for n in artifacts)
ctx.run(f'twine upload --repository="{repo}" {arg_display}')
@invoke.task
def generate_markdown(ctx):
log('Generating markdown from changelog...')
ctx.run('pandoc CHANGELOG.rst -f rst -t markdown -o CHANGELOG.md')
@invoke.task
def generate_changelog(ctx, commit=False, draft=False):
log('Generating changelog...')
if draft:
commit = False
log('Writing draft to file...')
ctx.run('towncrier --draft > CHANGELOG.draft.rst')
else:
ctx.run('towncrier')
if commit:
log('Committing...')
ctx.run('git add CHANGELOG.rst')
ctx.run('git rm CHANGELOG.draft.rst')
ctx.run('git commit -m "Update changelog."')
@invoke.task
def clean_mdchangelog(ctx, content=None):
changelog = None
if not content:
changelog = _get_git_root(ctx) / "CHANGELOG.md"
content = changelog.read_text()
content = re.sub(r"([^\n]+)\n?\s+\[[\\]+(#\d+)\]\(https://github\.com/pypa/[\w\-]+/issues/\d+\)", r"\1 \2", content, flags=re.MULTILINE)
if changelog:
changelog.write_text(content)
else:
return content
@invoke.task
def tag_version(ctx, push=False):
version = find_version(ctx)
version = Version.parse(version)
log('Tagging revision: v%s' % version.normalize())
ctx.run('git tag v%s' % version.normalize())
if push:
log('Pushing tags...')
ctx.run('git push origin master')
ctx.run('git push --tags')
@invoke.task
def bump_version(ctx, dry_run=False, dev=False, pre=False, tag=None, commit=False):
current_version = Version.parse(__version__)
today = datetime.date.today()
tomorrow = today + datetime.timedelta(days=1)
next_month = datetime.date.today().replace(month=today.month+1, day=1)
next_year = datetime.date.today().replace(year=today.year+1, month=1, day=1)
if pre and not tag:
print('Using "pre" requires a corresponding tag.')
return
if not (dev or pre or tag):
new_version = current_version.replace(release=today.timetuple()[:3]).clear(pre=True, dev=True)
if pre and dev:
raise RuntimeError("Can't use 'pre' and 'dev' together!")
if dev or pre:
new_version = current_version.replace(release=tomorrow.timetuple()[:3]).clear(pre=True, dev=True)
if dev:
new_version = new_version.bump_dev()
else:
new_version = new_version.bump_pre(tag=tag)
log('Updating version to %s' % new_version.normalize())
version = find_version(ctx)
log('Found current version: %s' % version)
if dry_run:
log('Would update to: %s' % new_version.normalize())
else:
log('Updating to: %s' % new_version.normalize())
version_file = get_version_file(ctx)
file_contents = version_file.read_text()
version_file.write_text(file_contents.replace(version, str(new_version.normalize())))
if commit:
ctx.run('git add {0}'.format(version_file.as_posix()))
log('Committing...')
ctx.run('git commit -s -m "Bumped version."')
|
import sys
from queue import Queue
import random
import tensorflow as tf
import numpy as np
import pandas as pd
import scipy.signal
import gym
#FIXME: move these to the net
#some quick wrapper methods for the state
def process_state(state):
#pad state if 1d with odd number of observations
dims = len(state.shape)
state = np.asarray(state, dtype=np.float32)
#handle rgb inputs
if dims == 3:
#convert rgb to greyscale
r, g, b = state[:, :, 0], state[:, :, 1], state[:, :, 2]
state = 0.2989 * r + 0.5870 * g + 0.1140 * b
state = state.reshape(state.shape + (1,))
#handle list of observations
elif dims == 1:
#convert to a 2d square 'image'
if not state.shape[0] % 2 == 0:
state = np.append(state, 0.0) #pad
w = int(state.shape[0] / 2)
state = state.reshape((w, w, 1))
#error for any unsupported sizes
elif dims < 1 or dims > 3:
print('error: state size unsupported: %s' % dims)
sys.exit(1)
#downsample to ?x?
#state = state[::2, ::2]
return state
def get_initial_state(env):
return process_state(env.reset())
def get_num_actions(env):
return env.action_space.n
def get_successor_state(env, action):
next_state, reward, done, _ = env.step(action)
return process_state(next_state), reward, done
#the prediction model
class A3C_Net(object):
def __init__(self, env, scope, sess, path='', seed=42, batchsize=None):
self.path = path
self.seed = seed
self.scope = scope
self.sess = sess
self.env = env
#trained for x batches
self.steps = 0
#set seeds
tf.set_random_seed(self.seed)
random.seed(self.seed)
#threadsafe queue
self.update_queue = Queue()
#spaceinvaders input is (210, 160, 3)
height, width, channels = get_initial_state(env).shape
n_actions = get_num_actions(env)
#ensure local copies of the net
with tf.name_scope(self.scope):
#preprocess raw inputs
with tf.name_scope('preprocess_input'):
#rgb input to square dimensions
self.state_in = tf.placeholder(tf.float32,
[batchsize, height, width, channels],
name='state_in')
dim = height if height > width else width
state_square = tf.image.resize_image_with_crop_or_pad(
self.state_in, dim, dim)
#action input to onehot
self.action_in = tf.placeholder(tf.int32, [batchsize],
name='action_in')
action_in = tf.one_hot(self.action_in, n_actions)
#reward input
self.reward_in = tf.placeholder(tf.float32, [batchsize],
name='reward_in')
#advantage input
self.advantage_in = tf.placeholder(tf.float32, [batchsize],
name='advantage_in')
'''
#3x3 conv2d, relu, 2x2 maxpool
with tf.name_scope('conv_pool'):
#filter shape = [height, width, in_channels,
#out_channels]
out_channels = 32 #FIXME: out_channels hardcoded
filter_shape = [3, 3, channels, out_channels]
conv_w = tf.Variable(tf.truncated_normal(filter_shape,
stddev=0.1), name='weight')
conv_b = tf.Variable(tf.constant(0.1,
shape=[out_channels]), name='bias')
conv = tf.nn.conv2d(state_square, conv_w,
strides=[1,1,1,1], padding='SAME')
relu = tf.nn.relu(conv + conv_b)
pool = tf.nn.max_pool(relu, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
'''
#FIXME: add dynamic lstm?
#fully connected with dropout
with tf.name_scope('dense_dropout'):
#flatten input
flat = tf.contrib.layers.flatten(state_square)
#FIXME: n hardcoded
n = 512
w_shape = [flat.get_shape()[-1].value, n]
fc_w = tf.Variable(tf.truncated_normal(w_shape,
stddev=0.1), name='weight')
fc_b = tf.Variable(tf.constant(0.1,
shape=[n]), name='bias')
fc_relu = tf.nn.relu(tf.matmul(flat, fc_w) + fc_b)
self.keep_prob = tf.placeholder(tf.float32)
drop = tf.nn.dropout(fc_relu, self.keep_prob)
#policy out
with tf.name_scope('action_prediction'):
a_w = tf.Variable(tf.truncated_normal([n, n_actions],
stddev=0.1), name='weight')
a_b = tf.Variable(tf.constant(0.1,
shape=[n_actions]), name='bias')
logits = tf.matmul(drop, a_w) + a_b
self.a_prob = tf.nn.softmax(logits)
a_logprob = tf.nn.log_softmax(logits)
a_pred = tf.reduce_sum(a_logprob * action_in, [1])
#exploration used in openai starter agent
logits_max = tf.reduce_max(logits, [1], keepdims=True)
dist = logits - logits_max
#simple exploration
#dist = a_logprob
action_random = tf.multinomial(dist, 1)
self.a_explore = tf.one_hot(action_random, n_actions)[0, :]
#value out
with tf.name_scope('value_prediction'):
v_w = tf.Variable(tf.truncated_normal([n, 1],
stddev=0.1), name='weight')
v_b = tf.Variable(tf.constant(0.1,
shape=[1]), name='bias')
self.v_pred = tf.reduce_sum(tf.matmul(drop, v_w) + v_b,
axis=1)
#loss and optimization
#functions from openai universe starter agent
#gradient = log (policy) * (v - v_pred) + beta * entropy
with tf.name_scope('loss'):
#value loss
v_loss = 0.5 * tf.reduce_sum(tf.square(
self.v_pred - self.reward_in))
#policy loss
a_loss = - tf.reduce_sum(a_pred * self.advantage_in)
#entropy
entropy = - tf.reduce_sum(self.a_prob * a_logprob)
#loss used for gradients
self.loss = a_loss + 0.5 * v_loss - entropy * 0.01
#calc and clip gradients for just local variables
with tf.name_scope('calc_gradients'):
#optimizer
learn_rate = 1e-4
self.optimizer = tf.train.AdamOptimizer(learn_rate)
#self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
#get local collection
self.variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
#compute returns a tuple list (grad, var)
grad, var = zip(*self.optimizer.compute_gradients(
self.loss, self.variables))
self.gradients, _ = tf.clip_by_global_norm(grad, 40.0)
with tf.name_scope('apply_gradients'):
#number of steps model has been trained
#note that batch input is considered 1 step
self.step_count = tf.Variable(0, name='step_count',
trainable=False)
self.inc_step = tf.assign_add(self.step_count, 1)
#input gradients are the same shape as trainiable vars
self.gradient_in = [tf.placeholder(tf.float32, x.shape)
for x in self.variables]
#zip with vars for optimizer
grads_vars = zip(self.gradient_in, self.variables)
self.optimize = self.optimizer.apply_gradients(
grads_vars, global_step=self.step_count)
with tf.name_scope('replace_vars'):
#create a placeholder for each trainable variable
self.vars_in = [tf.placeholder(tf.float32, x.shape)
for x in self.variables]
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope)
vars_list = zip(var, self.vars_in)
op_list = [v.assign(w) for v,w in vars_list]
self.put_vars = tf.group(*op_list)
#tensorboard visualization
with tf.name_scope('summaries'):
all_summaries = [
tf.summary.scalar('0_loss', self.loss),
tf.summary.scalar('1_v_loss', v_loss),
tf.summary.scalar('2_a_loss', a_loss),
tf.summary.scalar('3_entropy', entropy),
]
#tensorboard data
self.summaries = tf.summary.merge(all_summaries)
#separate summary dirs
self.writer = tf.summary.FileWriter('./logs/%s_data' % (
self.scope,), self.loss.graph, flush_secs=1)
#self.sess = tf.Session()
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=self.scope)
init_op = tf.variables_initializer(all_vars)
self.sess.run(init_op)
print ('[+] %s net initialized' % self.scope)
def process_batch(self, batch):
#FIXME: this is dumb, move to using an object to store batch
#split batch
imgs = []
actions = []
rewards = []
advantages = []
dones = []
for elem in batch:
img, action, reward, value, done = elem
imgs.append(img)
actions.append(action)
rewards.append(float(reward))
advantages.append(value) #calc advantages below
dones.append(int(done)) #convert from bool
#calc advantages
reward = 0.0
if not dones[-1]:
reward = advantages[-1]
for i in range(len(rewards) - 1, -1, -1): #reverse iterate
reward = rewards[i] + 0.99 * reward
rewards[i] = reward
advantages[i] = reward - advantages[i]
#convert to np arrays
imgs = np.asarray(imgs).astype(np.float32)
actions = np.asarray(actions).astype(np.int32)
rewards = np.asarray(rewards).astype(np.float32)
advantages = np.asarray(advantages).astype(np.float32)
dones = np.asarray(dones).astype(np.int32)
return imgs, actions, rewards, advantages, dones
def get_weights(self):
#need to convert tensors to numpy arrays
weights = [x.eval(session=self.sess) for x in self.variables]
return weights
def put_weights(self, weights):
self.sess.run([self.put_vars], feed_dict={
ph: v for ph,v in zip(self.vars_in, weights)})
def apply_gradients(self, gradients):
self.update_queue.put(gradients)
def update_loop(self, steps, print_interval=100):
#apply gradients in order given (fifo)
step = self.get_step()
while step < steps:
while not self.update_queue.empty():
#update msg
if step % print_interval == 0 or step == steps - 1:
print ('%s applying grad %s' % (self.scope, step))
gradients = self.update_queue.get()
self.sess.run([self.optimize], feed_dict={
ph: g for ph,g in zip(self.gradient_in, gradients)})
step = self.get_step()
def calc_gradients(self, batch):
imgs, actions, rewards, advantages, _ = self.process_batch(
batch)
loss, gradients, summary, step = self.sess.run([self.loss,
self.gradients, self.summaries, self.inc_step],
feed_dict={
self.state_in: imgs,
self.action_in: actions,
self.reward_in: rewards,
self.advantage_in: advantages,
self.keep_prob: 0.5})
#print ('%s step: %s' % (self.scope, step))
self.writer.add_summary(summary, step)
return gradients
def get_action_value(self, state, keep_prob=0.5, explore=True):
action_op = self.a_explore if explore else self.a_prob
action, value = self.sess.run([self.a_prob, self.v_pred],
feed_dict={self.state_in: [state],
self.keep_prob: keep_prob})
return np.argmax(action[0]), value[0]
def get_step(self):
return self.step_count.eval(session=self.sess)
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, '%s/model' % self.path)
class A3C_Worker(object):
def __init__(self, coordinator, global_net, local_net, scope,
batchsize=20):
self.scope = scope
self.global_net = global_net
self.local_net = local_net
self.pull_weights()
self.batchsize = batchsize
def train(self, env, global_step_max=10):
batch = []
state = get_initial_state(env)
#t = 0
#episode = 0
while self.global_net.get_step() < global_step_max:
action, value = self.local_net.get_action_value(state)
next_state, reward, done = get_successor_state(env, action)
#reward update
value = 0 if done else value
#t += 1
#add example to batch
example = (state, action, reward, value, done)
batch.append(example)
#reset if terminal state, else continue
if done:
state = get_initial_state(env)
#print('episode %s finished in %s steps' % (episode, t))
#t = 0
#episode += 1
state = next_state
if len(batch) >= self.batchsize:
#push gradients to global_net
self.push_gradients(batch)
#pull gradients from global_net
self.pull_weights()
#reset experience batch
batch = []
print ('%s quit after training for %s' % (self.scope,
self.local_net.get_step()))
def push_gradients(self, batch):
gradients = self.local_net.calc_gradients(batch)
self.global_net.apply_gradients(gradients)
def pull_weights(self):
self.local_net.put_weights(self.global_net.get_weights())
def test(self, env, episodes=100, records=4, out_dir='./logs/records'):
#wrap env, record x episodes and eval scores
#func that indicates which episodes to record and write
vc = lambda n: n in [int(x) for x in np.linspace(episodes, 0,
records)]
#wrapper that records episodes
env = gym.wrappers.Monitor(env, directory=out_dir,
force=True, video_callable=vc)
#pull weights from global before testing
self.pull_weights()
#play for x episodes
stats = {
'steps': [],
'rewards': [],
}
for i in range(episodes):
steps = 0
done = False
rewards = 0
state = get_initial_state(env)
while not done:
action, _ = self.local_net.get_action_value(state,
keep_prob=1.0, explore=False)
state, reward, done = get_successor_state(env, action)
rewards += reward
steps += 1
stats['steps'].append(steps)
stats['rewards'].append(rewards)
#output some stats
print('\n%s tested for %s episodes' % (self.scope, episodes))
stats = pd.DataFrame(data=stats)
print(stats.describe().loc[['min', 'max', 'mean', 'std']])
|
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.publishing.site_page import SitePage
from tests import test_client_credentials, test_team_site_url
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
new_page = ctx.site_pages.pages.add()
new_page.save_draft(title="Latest News 456")
new_page.publish().execute_query()
pages = ctx.site_pages.pages.get().execute_query()
for page in pages: # type: SitePage
print(page.file_name)
|
# -*- coding: utf-8 -*-
"""
como.battery - the power connection
"""
# http://www.macrumors.com/2010/04/16/apple-tweaks-serial-number-format-with-new-macbook-pro/
import sys
import platform
from datetime import date, datetime
from clint.textui import puts
from paxo.util import is_osx, is_lin, is_win
from como.settings import LOCATION_CODES
# OS dependent imports
if is_osx or is_lin:
import subprocess
if is_win:
try:
import win32api
except ImportError:
print("The windows python api isn't installed. Please install pywin32.")
sys.exit(1)
try:
import wmi
except ImportError:
print("Make sure wmi is installed.")
sys.exit(1)
def get_age():
"""Get age of computer. Only OSX for now"""
if is_osx:
serial = {}
cmd = "ioreg -l | awk '/IOPlatformSerialNumber/ " + \
"{ split($0, line, \"\\\"\"); printf(\"%s\\n\", line[4]); }'"
serial['number'] = subprocess.check_output(
cmd, shell=True).translate(None, b'\n')
temp = serial['number']
if len(temp) == 11:
for code in LOCATION_CODES:
temp = temp.lstrip(code)
serial['year'] = int(temp[0])
serial['week'] = int(temp[1:3])
else:
serial['year'] = 0
serial['week'] = 0
return "N/A"
creation = str(date.today().year)[:-1] + str(
serial['year']) + str(serial['week']) + "1"
timedelta = datetime.utcnow() - datetime.strptime(creation, '%Y%W%w')
return timedelta.days / 30
else:
puts("no age on this operating system")
sys.exit(0)
def grep_filter(list, term):
for line in list:
if term in line:
yield line
def get_battery():
"""Gets all information associated with the battery from respective
system sources"""
battery = {}
if is_osx:
raw_osx_version, _, _ = platform.mac_ver()
osx_version = str('.'.join(raw_osx_version.split('.')[:2]))
# TODO: evaluate other sources like: system_profiler SPPowerDataType | grep "Cycle Count" | awk '{print $3}'
tmp = subprocess.check_output('ioreg -w0 -l | grep Capacity', shell=True)
bat = tmp.translate(None, b' "|').split(b'\n')
battery['serial'] = subprocess.check_output(
'ioreg -w0 -l | grep BatterySerialNumber',
shell=True
).translate(None, b'\n "|').lstrip(b'BatterySerialNumber=')
# battery['temp'] = int(subprocess.check_output(
# 'ioreg -w0 -l | grep Temperature',
# shell=True).translate(None, '\n "|').lstrip('Temperature='))
if osx_version == "10.15":
battery['maxcap'] = int(bat[3].lstrip(b'MaxCapacity='))
battery['curcap'] = int(bat[4].lstrip(b'CurrentCapacity='))
battery['legacy'] = bat[5].lstrip(b'LegacyBatteryInfo=')
battery['designcap'] = int(bat[7].lstrip(b'DesignCapacity='))
elif osx_version == "10.14":
battery['maxcap'] = int(bat[3].lstrip(b'MaxCapacity='))
battery['curcap'] = int(bat[4].lstrip(b'CurrentCapacity='))
battery['legacy'] = bat[5].lstrip(b'LegacyBatteryInfo=')
battery['designcap'] = int(bat[6].lstrip(b'DesignCapacity='))
else:
battery['maxcap'] = int(bat[1].lstrip(b'MaxCapacity='))
battery['curcap'] = int(bat[2].lstrip(b'CurrentCapacity='))
battery['legacy'] = bat[3].lstrip(b'LegacyBatteryInfo=')
battery['designcap'] = int(bat[4].lstrip(b'DesignCapacity='))
battery['cycles'] = int(
battery['legacy'].translate(
None, b'{}=').split(b',')[5].lstrip(b'CycleCount'))
battery['amperage'] = int(
battery['legacy'].translate(
None, b'{}=').split(b',')[0].lstrip(b'Amperage'))
if battery['amperage'] > 999999:
battery['amperage'] -= 18446744073709551615
battery['voltage'] = int(
battery['legacy'].translate(
None, b'{}=').split(b',')[4].lstrip(b'Voltage'))
elif is_lin:
battery['serial'] = subprocess.check_output(
"grep \"^serial number\" " +
"/proc/acpi/battery/BAT0/info | awk '{ print $3 }'",
shell=True
).translate(None, b'\n')
battery['state'] = subprocess.check_output(
"grep \"^charging state\" " +
"/proc/acpi/battery/BAT0/state | awk '{ print $3 }'",
shell=True
)
battery['maxcap'] = float(subprocess.check_output(
"grep \"^last full capacity\" " +
"/proc/acpi/battery/BAT0/info | awk '{ print $4 }'",
shell=True
))
battery['curcap'] = float(subprocess.check_output(
"grep \"^remaining capacity\" " +
"/proc/acpi/battery/BAT0/state | awk '{ print $3 }'",
shell=True
))
battery['designcap'] = float(subprocess.check_output(
"grep \"^design capacity:\" " +
"/proc/acpi/battery/BAT0/info | awk '{ print $3 }'",
shell=True
))
battery['cycles'] = int(subprocess.check_output(
"grep \"^cycle count\" /proc/acpi/battery/BAT0/info",
shell=True
).lstrip("cycle count:").translate(None, ' '))
elif is_win:
# Get power status of the system using ctypes to call GetSystemPowerStatus
"""import ctypes
from ctypes import wintypes
class SYSTEM_POWER_STATUS(ctypes.Structure):
_fields_ = [
('ACLineStatus', wintypes.BYTE),
('BatteryFlag', wintypes.BYTE),
('BatteryLifePercent', wintypes.BYTE),
('Reserved1', wintypes.BYTE),
('BatteryLifeTime', wintypes.DWORD),
('BatteryFullLifeTime', wintypes.DWORD),
]
SYSTEM_POWER_STATUS_P = ctypes.POINTER(SYSTEM_POWER_STATUS)
GetSystemPowerStatus = ctypes.windll.kernel32.GetSystemPowerStatus
GetSystemPowerStatus.argtypes = [SYSTEM_POWER_STATUS_P]
GetSystemPowerStatus.restype = wintypes.BOOL
status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(ctypes.pointer(status)):
raise ctypes.WinError()
print 'ACLineStatus', status.ACLineStatus
print 'BatteryFlag', status.BatteryFlag
print 'BatteryLifePercent', status.BatteryLifePercent
print 'BatteryLifeTime', status.BatteryLifeTime
print 'BatteryFullLifeTime', status.BatteryFullLifeTime"""
#c = wmi.WMI()
t = wmi.WMI(moniker="//./root/wmi")
#b = c.Win32_Battery()[0]
battery['maxcap'] = t.ExecQuery("Select * from BatteryFullChargedCapacity")[0].FullChargedCapacity
batt = t.ExecQuery("Select * from BatteryStatus where Voltage > 0")[0]
battery['curcap'] = batt.RemainingCapacity
battery['voltage'] = batt.Voltage
battery['amperage'] = batt.ChargeRate
if batt.Charging:
battery['amperage'] = batt.ChargeRate
elif batt.Discharging:
battery['amperage'] = batt.DischargeRate
else:
battery['amperage'] = 0
battery['serial'] = batt.Name
return battery
|
# -*- coding: utf-8 -*-
"""
This is the entry point of the Flask application.
"""
import subprocess
import unittest
import coverage
from flask_script import Manager
from app import LOGGER, create_app
# The logger should always be used instead of a print(). You need to import it from
# the app package. If you want to understand how to use it properly and why you
# should use it, check: http://bit.ly/2nqkupO
LOGGER.info('Server has started.')
# Defines which parts of the code to include and omit when calculating code coverage.
COV = coverage.coverage(branch=True,
include='app/*',
omit=['tests/*', 'app/website/*', '*__init__*'])
COV.start()
# Creates the Flask application object that we use to initialize things in the app.
app = create_app()
# Initializes the Manager object, which allows us to run terminal commands on the
# Flask application while it's running (using Flask-Script).
manager = Manager(app)
@manager.command
def cov():
"""
Runs the unit tests and generates a coverage report on success.
While the application is running, you can run the following command in a new terminal:
'docker-compose run --rm flask python manage.py cov' to run all the tests in the
'tests' directory. If all the tests pass, it will generate a coverage report.
:return int: 0 if all tests pass, 1 if not
"""
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
COV.html_report()
COV.erase()
return 0
else:
return 1
@manager.command
def test():
"""
Runs the unit tests without generating a coverage report.
Enter 'docker-compose run --rm flask python manage.py test' to run all the tests in the
'tests' directory, with no coverage report.
:return int: 0 if all tests pass, 1 if not
"""
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def test_one(test_file):
"""
Runs the unittest without generating a coverage report.
Enter 'docker-compose run --rm flask python manage.py test_one <NAME_OF_FILE>' to run only
one test file in the 'tests' directory. It provides no coverage report.
Example: 'docker-compose run --rm flask python manage.py test_one test_website'
Note that you do not need to put the extension of the test file.
:return int: 0 if all tests pass, 1 if not
"""
tests = unittest.TestLoader().discover('tests', pattern=test_file + '.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print('Running {}'.format(isort))
subprocess.call(isort, shell=True)
print('Running {}'.format(yapf))
subprocess.call(yapf, shell=True)
# Starts the Flask app.
if __name__ == '__main__':
manager.run()
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import exceptions as lib_exc
from neutron.common import exceptions
from neutron import context
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db.quota import driver
from neutron.tests.unit import testlib_api
class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver):
"""A fake plugin class containing all DB methods."""
class TestResource(object):
"""Describe a test resource for quota checking."""
def __init__(self, name, default, fake_count=0):
self.name = name
self.quota = default
self.fake_count = fake_count
@property
def default(self):
return self.quota
def count(self, *args, **kwargs):
return self.fake_count
PROJECT = 'prj_test'
RESOURCE = 'res_test'
ALT_RESOURCE = 'res_test_meh'
class TestDbQuotaDriver(testlib_api.SqlTestCase):
def setUp(self):
super(TestDbQuotaDriver, self).setUp()
self.plugin = FakePlugin()
self.context = context.get_admin_context()
def test_create_quota_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
self.assertEqual(2, quotas[RESOURCE])
def test_update_quota_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3)
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
self.assertEqual(3, quotas[RESOURCE])
def test_delete_tenant_quota_restores_default_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.delete_tenant_quota(self.context, PROJECT)
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
self.assertEqual(4, quotas[RESOURCE])
def test_get_tenant_quotas(self):
user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT)
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
quotas = self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT)
self.assertEqual(2, quotas[RESOURCE])
def test_get_tenant_quotas_different_tenant(self):
user_ctx = context.Context(user_id=PROJECT,
tenant_id='another_project')
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
# It is appropriate to use assertFalse here as the expected return
# value is an empty dict (the defaults passed in the statement below
# after the request context)
self.assertFalse(self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT))
def test_get_all_quotas(self):
project_1 = 'prj_test_1'
project_2 = 'prj_test_2'
resource_1 = 'res_test_1'
resource_2 = 'res_test_2'
resources = {resource_1: TestResource(resource_1, 3),
resource_2: TestResource(resource_2, 5)}
self.plugin.update_quota_limit(self.context, project_1, resource_1, 7)
self.plugin.update_quota_limit(self.context, project_2, resource_2, 9)
quotas = self.plugin.get_all_quotas(self.context, resources)
# Expect two tenants' quotas
self.assertEqual(2, len(quotas))
# But not quotas for the same tenant twice
self.assertNotEqual(quotas[0]['tenant_id'], quotas[1]['tenant_id'])
# Check the expected limits. The quotas can be in any order.
for quota in quotas:
self.assertEqual(3, len(quota))
project = quota['tenant_id']
self.assertIn(project, (project_1, project_2))
if project == project_1:
expected_limit_r1 = 7
expected_limit_r2 = 5
if project == project_2:
expected_limit_r1 = 3
expected_limit_r2 = 9
self.assertEqual(expected_limit_r1, quota[resource_1])
self.assertEqual(expected_limit_r2, quota[resource_2])
def test_limit_check(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.limit_check(self.context, PROJECT, resources, values)
def test_limit_check_over_quota(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 3}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(lib_exc.OverQuota, self.plugin.limit_check,
context.get_admin_context(), PROJECT, resources,
values)
def test_limit_check_equals_to_quota(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 2}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.limit_check(self.context, PROJECT, resources, values)
def test_limit_check_value_lower_than_zero(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: -1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(exceptions.InvalidQuotaValue,
self.plugin.limit_check, context.get_admin_context(),
PROJECT, resources, values)
def _test_make_reservation_success(self, quota_driver,
resource_name, deltas):
resources = {resource_name: TestResource(resource_name, 2)}
self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2)
reservation = quota_driver.make_reservation(
self.context,
self.context.tenant_id,
resources,
deltas,
self.plugin)
self.assertIn(resource_name, reservation.deltas)
self.assertEqual(deltas[resource_name],
reservation.deltas[resource_name])
self.assertEqual(self.context.tenant_id,
reservation.tenant_id)
def test_make_reservation_single_resource(self):
quota_driver = driver.DbQuotaDriver()
self._test_make_reservation_success(
quota_driver, RESOURCE, {RESOURCE: 1})
def test_make_reservation_fill_quota(self):
quota_driver = driver.DbQuotaDriver()
self._test_make_reservation_success(
quota_driver, RESOURCE, {RESOURCE: 2})
def test_make_reservation_multiple_resources(self):
quota_driver = driver.DbQuotaDriver()
resources = {RESOURCE: TestResource(RESOURCE, 2),
ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)}
deltas = {RESOURCE: 1, ALT_RESOURCE: 2}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2)
reservation = quota_driver.make_reservation(
self.context,
self.context.tenant_id,
resources,
deltas,
self.plugin)
self.assertIn(RESOURCE, reservation.deltas)
self.assertIn(ALT_RESOURCE, reservation.deltas)
self.assertEqual(1, reservation.deltas[RESOURCE])
self.assertEqual(2, reservation.deltas[ALT_RESOURCE])
self.assertEqual(self.context.tenant_id,
reservation.tenant_id)
def test_make_reservation_over_quota_fails(self):
quota_driver = driver.DbQuotaDriver()
resources = {RESOURCE: TestResource(RESOURCE, 2,
fake_count=2)}
deltas = {RESOURCE: 1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(lib_exc.OverQuota,
quota_driver.make_reservation,
self.context,
self.context.tenant_id,
resources,
deltas,
self.plugin)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from ProcessOptimizer.space import Integer, Categorical
from ProcessOptimizer import gp_minimize, bokeh_plot
# For reproducibility
np.random.seed(123)
plt.set_cmap("viridis")
SPACE = [
Integer(1, 20, name='max_depth'),
Integer(2, 100, name='min_samples_split'),
Integer(5, 30, name='min_samples_leaf'),
Integer(1, 30, name='max_features'),
Categorical(list('abc'), name='dummy'),
Categorical(['gini', 'entropy'], name='criterion'),
Categorical(list('def'), name='dummy'),
]
def objective(params):
clf = DecisionTreeClassifier(
**{dim.name: val for dim,
val in zip(SPACE, params) if dim.name != 'dummy'})
return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))
result = gp_minimize(objective, SPACE, n_calls=20)
bokeh_plot.start(result)
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `sharkcoin-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
# -*- coding: UTF-8 -*-
"""
概率分布函数
"""
import numpy as np
import scipy.stats as st
import matplotlib as mpl
import matplotlib.pyplot as plt
# 二项分布
def binomial_distribution():
n = 10
p = 0.3
k = np.arange(0, 21)
binomial = st.binom.pmf(k=k, n=n, p=p)
plt.plot(k, binomial, 'o-')
plt.title('Binomial: n=%i, p=%.2f' % (n, p))
plt.show()
# 生成服从二项分布的随机变量
def binomial_distribution_rvs():
data = st.binom.rvs(n=10, p=0.3, size=10000)
plt.hist(data, 20, facecolor='g', alpha=0.75)
plt.show()
# 泊松分布
def poisson_distribution():
rate = 2
n = np.arange(0, 11)
poisson = st.poisson.pmf(n, rate)
plt.plot(n, poisson, 'o-')
plt.title('Poisson: rate=%i' % rate)
plt.show()
# 生成服从泊松分布的随机变量
def poisson_distribution_rvs():
data = st.poisson.rvs(mu=2, loc=0, size=10000)
plt.hist(data, 20, facecolor='g', alpha=0.75)
plt.show()
# 正态分布
def normal_distribution():
mu = 0 # mean
sigma = 1 # standard deviation
x = np.arange(-5, 5, 0.1)
normal = st.norm.pdf(x, mu, sigma)
plt.plot(x, normal)
plt.title('Normal: $\mu$=%.1f, $\sigma^2$=%.1f' % (mu, sigma))
plt.grid(True)
plt.show()
# beta分布
def beta_distribution():
a = 0.5
b = 0.5
x = np.arange(0.01, 1, 0.01)
beta = st.beta.pdf(x, a, b)
plt.plot(x, beta)
plt.title('Beta: a=%.1f, b=%.1f' % (a, b))
plt.show()
# 指数分布
def exponential_distribution():
l = 0.5
x = np.arange(0, 15, 0.1)
exponential = l * np.exp(-l * x)
plt.plot(x, exponential)
plt.title('Exponential: $\lambda$=%.2f' % l)
plt.show()
# binomial_distribution()
# binomial_distribution_rvs()
# poisson_distribution()
# poisson_distribution_rvs()
# normal_distribution()
# beta_distribution()
exponential_distribution()
|
import os
import time
from selenium import webdriver
BROWSER_HEIGHT = 1024
BROWSER_WIDTH = 800
USERNAME = os.environ.get("APP_USERNAME")
PASSWORD = os.environ.get("APP_PASSWORD")
BOARD_ID = os.environ.get("APP_BOARD_ID")
DRIVER_PATH = os.environ.get("APP_WEBDRIVER_PATH", "geckodriver")
HEADLESS = os.environ.get("APP_ENABLE_HEADLESS", True)
class SiteCapture:
def __init__(self):
firefox_options = webdriver.FirefoxOptions()
if HEADLESS:
firefox_options.add_argument("-headless")
self.driver = webdriver.Firefox(
executable_path=DRIVER_PATH,
options=firefox_options,
)
self.driver.set_window_size(BROWSER_HEIGHT, BROWSER_WIDTH)
def step_login(self):
self.driver.get("https://trello.com/login")
self.driver.find_element_by_css_selector("#user").send_keys(USERNAME)
self.driver.find_element_by_css_selector("#password").send_keys(PASSWORD)
self.driver.find_element_by_css_selector("#login").click()
def step_snap(self):
# implicitly_wait だとボード画面に遷移できない
time.sleep(3)
self.driver.get(f"https://trello.com/b/{BOARD_ID}/")
self.driver.get_screenshot_as_file('board.png')
def close(self):
self.driver.close()
def main():
site_capture = SiteCapture()
try:
site_capture.step_login()
site_capture.step_snap()
finally:
site_capture.close()
if __name__ == "__main__":
main()
|
import hashlib
import base64
def md5_file(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hash=hash_md5.digest()
return base64.b64encode(hash).decode('utf-8')
|
# -*- coding: utf-8 -*-
"""The ZIP directory implementation."""
from dfvfs.path import zip_path_spec
from dfvfs.vfs import directory
class ZIPDirectory(directory.Directory):
"""File system directory that uses zipfile."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
ZipPathSpec: a path specification.
"""
location = getattr(self.path_spec, 'location', None)
if location and location.startswith(self._file_system.PATH_SEPARATOR):
# The zip_info filename does not have the leading path separator
# as the location string does.
zip_path = location[1:]
# Set of top level sub directories that have been yielded.
processed_directories = set()
zip_file = self._file_system.GetZipFile()
for zip_info in zip_file.infolist():
path = getattr(zip_info, 'filename', None)
if path is not None and not isinstance(path, str):
try:
path = path.decode(self._file_system.encoding)
except UnicodeDecodeError:
path = None
if not path or not path.startswith(zip_path):
continue
# Ignore the directory itself.
if path == zip_path:
continue
path_segment, suffix = self._file_system.GetPathSegmentAndSuffix(
zip_path, path)
if not path_segment:
continue
# Some times the ZIP file lacks directories, therefore we will
# provide virtual ones.
if suffix:
path_spec_location = self._file_system.JoinPath([
location, path_segment])
is_directory = True
else:
path_spec_location = self._file_system.JoinPath([path])
is_directory = path.endswith('/')
if is_directory:
if path_spec_location in processed_directories:
continue
processed_directories.add(path_spec_location)
# Restore / at end path to indicate a directory.
path_spec_location += self._file_system.PATH_SEPARATOR
yield zip_path_spec.ZipPathSpec(
location=path_spec_location, parent=self.path_spec.parent)
|
from boto3 import Session
from moto.core import BaseBackend
from moto.core.utils import unix_time_millis
from .exceptions import (
ResourceNotFoundException,
ResourceAlreadyExistsException,
InvalidParameterException,
LimitExceededException,
)
class LogEvent:
_event_id = 0
def __init__(self, ingestion_time, log_event):
self.ingestionTime = ingestion_time
self.timestamp = log_event["timestamp"]
self.message = log_event["message"]
self.eventId = self.__class__._event_id
self.__class__._event_id += 1
def to_filter_dict(self):
return {
"eventId": str(self.eventId),
"ingestionTime": self.ingestionTime,
# "logStreamName":
"message": self.message,
"timestamp": self.timestamp,
}
def to_response_dict(self):
return {
"ingestionTime": self.ingestionTime,
"message": self.message,
"timestamp": self.timestamp,
}
class LogStream:
_log_ids = 0
def __init__(self, region, log_group, name):
self.region = region
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
region=region,
id=self.__class__._log_ids,
log_group=log_group,
log_stream=name,
)
self.creationTime = int(unix_time_millis())
self.firstEventTimestamp = None
self.lastEventTimestamp = None
self.lastIngestionTime = None
self.logStreamName = name
self.storedBytes = 0
self.uploadSequenceToken = (
0 # I'm guessing this is token needed for sequenceToken by put_events
)
self.events = []
self.destination_arn = None
self.filter_name = None
self.__class__._log_ids += 1
def _update(self):
# events can be empty when stream is described soon after creation
self.firstEventTimestamp = (
min([x.timestamp for x in self.events]) if self.events else None
)
self.lastEventTimestamp = (
max([x.timestamp for x in self.events]) if self.events else None
)
def to_describe_dict(self):
# Compute start and end times
self._update()
res = {
"arn": self.arn,
"creationTime": self.creationTime,
"logStreamName": self.logStreamName,
"storedBytes": self.storedBytes,
}
if self.events:
rest = {
"firstEventTimestamp": self.firstEventTimestamp,
"lastEventTimestamp": self.lastEventTimestamp,
"lastIngestionTime": self.lastIngestionTime,
"uploadSequenceToken": str(self.uploadSequenceToken),
}
res.update(rest)
return res
def put_log_events(
self, log_group_name, log_stream_name, log_events, sequence_token
):
# TODO: ensure sequence_token
# TODO: to be thread safe this would need a lock
self.lastIngestionTime = int(unix_time_millis())
# TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
events = [
LogEvent(self.lastIngestionTime, log_event) for log_event in log_events
]
self.events += events
self.uploadSequenceToken += 1
if self.destination_arn and self.destination_arn.split(":")[2] == "lambda":
from moto.awslambda import lambda_backends # due to circular dependency
lambda_log_events = [
{
"id": event.eventId,
"timestamp": event.timestamp,
"message": event.message,
}
for event in events
]
lambda_backends[self.region].send_log_event(
self.destination_arn,
self.filter_name,
log_group_name,
log_stream_name,
lambda_log_events,
)
return "{:056d}".format(self.uploadSequenceToken)
def get_log_events(
self,
log_group_name,
log_stream_name,
start_time,
end_time,
limit,
next_token,
start_from_head,
):
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
def get_index_and_direction_from_token(token):
if token is not None:
try:
return token[0], int(token[2:])
except Exception:
raise InvalidParameterException(
"The specified nextToken is invalid."
)
return None, 0
events = sorted(
filter(filter_func, self.events), key=lambda event: event.timestamp
)
direction, index = get_index_and_direction_from_token(next_token)
limit_index = limit - 1
final_index = len(events) - 1
if direction is None:
if start_from_head:
start_index = 0
end_index = start_index + limit_index
else:
end_index = final_index
start_index = end_index - limit_index
elif direction == "f":
start_index = index + 1
end_index = start_index + limit_index
elif direction == "b":
end_index = index - 1
start_index = end_index - limit_index
else:
raise InvalidParameterException("The specified nextToken is invalid.")
if start_index < 0:
start_index = 0
elif start_index > final_index:
return (
[],
"b/{:056d}".format(final_index),
"f/{:056d}".format(final_index),
)
if end_index > final_index:
end_index = final_index
elif end_index < 0:
return ([], "b/{:056d}".format(0), "f/{:056d}".format(0))
events_page = [
event.to_response_dict() for event in events[start_index : end_index + 1]
]
return (
events_page,
"b/{:056d}".format(start_index),
"f/{:056d}".format(end_index),
)
def filter_log_events(
self,
log_group_name,
log_stream_names,
start_time,
end_time,
limit,
next_token,
filter_pattern,
interleaved,
):
if filter_pattern:
raise NotImplementedError("filter_pattern is not yet implemented")
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
events = []
for event in sorted(
filter(filter_func, self.events), key=lambda x: x.timestamp
):
event_obj = event.to_filter_dict()
event_obj["logStreamName"] = self.logStreamName
events.append(event_obj)
return events
class LogGroup:
def __init__(self, region, name, tags, **kwargs):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name
)
self.creationTime = int(unix_time_millis())
self.tags = tags
self.streams = dict() # {name: LogStream}
self.retention_in_days = kwargs.get(
"RetentionInDays"
) # AWS defaults to Never Expire for log group retention
self.subscription_filters = []
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
raise ResourceAlreadyExistsException()
self.streams[log_stream_name] = LogStream(
self.region, self.name, log_stream_name
)
def delete_log_stream(self, log_stream_name):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
del self.streams[log_stream_name]
def describe_log_streams(
self,
descending,
limit,
log_group_name,
log_stream_name_prefix,
next_token,
order_by,
):
# responses only logStreamName, creationTime, arn, storedBytes when no events are stored.
log_streams = [
(name, stream.to_describe_dict())
for name, stream in self.streams.items()
if name.startswith(log_stream_name_prefix)
]
def sorter(item):
return (
item[0]
if order_by == "logStreamName"
else item[1].get("lastEventTimestamp", 0)
)
log_streams = sorted(log_streams, key=sorter, reverse=descending)
first_index = 0
if next_token:
try:
group, stream = next_token.split("@")
if group != log_group_name:
raise ValueError()
first_index = (
next(
index
for (index, e) in enumerate(log_streams)
if e[1]["logStreamName"] == stream
)
+ 1
)
except (ValueError, StopIteration):
first_index = 0
log_streams = []
last_index = first_index + limit
if last_index > len(log_streams):
last_index = len(log_streams)
log_streams_page = [x[1] for x in log_streams[first_index:last_index]]
new_token = None
if log_streams_page and last_index < len(log_streams):
new_token = "{}@{}".format(
log_group_name, log_streams_page[-1]["logStreamName"]
)
return log_streams_page, new_token
def put_log_events(
self, log_group_name, log_stream_name, log_events, sequence_token
):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.put_log_events(
log_group_name, log_stream_name, log_events, sequence_token
)
def get_log_events(
self,
log_group_name,
log_stream_name,
start_time,
end_time,
limit,
next_token,
start_from_head,
):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.get_log_events(
log_group_name,
log_stream_name,
start_time,
end_time,
limit,
next_token,
start_from_head,
)
def filter_log_events(
self,
log_group_name,
log_stream_names,
start_time,
end_time,
limit,
next_token,
filter_pattern,
interleaved,
):
streams = [
stream
for name, stream in self.streams.items()
if not log_stream_names or name in log_stream_names
]
events = []
for stream in streams:
events += stream.filter_log_events(
log_group_name,
log_stream_names,
start_time,
end_time,
limit,
next_token,
filter_pattern,
interleaved,
)
if interleaved:
events = sorted(events, key=lambda event: event["timestamp"])
first_index = 0
if next_token:
try:
group, stream, event_id = next_token.split("/")
if group != log_group_name:
raise ValueError()
first_index = (
next(
index
for (index, e) in enumerate(events)
if e["logStreamName"] == stream and e["eventId"] == event_id
)
+ 1
)
except (ValueError, StopIteration):
first_index = 0
# AWS returns an empty list if it receives an invalid token.
events = []
last_index = first_index + limit
if last_index > len(events):
last_index = len(events)
events_page = events[first_index:last_index]
next_token = None
if events_page and last_index < len(events):
last_event = events_page[-1]
next_token = "{}/{}/{}".format(
log_group_name, last_event["logStreamName"], last_event["eventId"]
)
searched_streams = [
{"logStreamName": stream.logStreamName, "searchedCompletely": True}
for stream in streams
]
return events_page, next_token, searched_streams
def to_describe_dict(self):
log_group = {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
# AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire)
if self.retention_in_days:
log_group["retentionInDays"] = self.retention_in_days
return log_group
def set_retention_policy(self, retention_in_days):
self.retention_in_days = retention_in_days
def list_tags(self):
return self.tags if self.tags else {}
def tag(self, tags):
if self.tags:
self.tags.update(tags)
else:
self.tags = tags
def untag(self, tags_to_remove):
if self.tags:
self.tags = {
k: v for (k, v) in self.tags.items() if k not in tags_to_remove
}
def describe_subscription_filters(self):
return self.subscription_filters
def put_subscription_filter(
self, filter_name, filter_pattern, destination_arn, role_arn
):
creation_time = int(unix_time_millis())
# only one subscription filter can be associated with a log group
if self.subscription_filters:
if self.subscription_filters[0]["filterName"] == filter_name:
creation_time = self.subscription_filters[0]["creationTime"]
else:
raise LimitExceededException
for stream in self.streams.values():
stream.destination_arn = destination_arn
stream.filter_name = filter_name
self.subscription_filters = [
{
"filterName": filter_name,
"logGroupName": self.name,
"filterPattern": filter_pattern,
"destinationArn": destination_arn,
"roleArn": role_arn,
"distribution": "ByLogStream",
"creationTime": creation_time,
}
]
def delete_subscription_filter(self, filter_name):
if (
not self.subscription_filters
or self.subscription_filters[0]["filterName"] != filter_name
):
raise ResourceNotFoundException(
"The specified subscription filter does not exist."
)
self.subscription_filters = []
class LogsBackend(BaseBackend):
def __init__(self, region_name):
self.region_name = region_name
self.groups = dict() # { logGroupName: LogGroup}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_log_group(self, log_group_name, tags, **kwargs):
if log_group_name in self.groups:
raise ResourceAlreadyExistsException()
self.groups[log_group_name] = LogGroup(
self.region_name, log_group_name, tags, **kwargs
)
return self.groups[log_group_name]
def ensure_log_group(self, log_group_name, tags):
if log_group_name in self.groups:
return
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
def delete_log_group(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
del self.groups[log_group_name]
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ""
groups = [
group.to_describe_dict()
for name, group in self.groups.items()
if name.startswith(log_group_name_prefix)
]
groups = sorted(groups, key=lambda x: x["logGroupName"])
index_start = 0
if next_token:
try:
index_start = (
next(
index
for (index, d) in enumerate(groups)
if d["logGroupName"] == next_token
)
+ 1
)
except StopIteration:
index_start = 0
# AWS returns an empty list if it receives an invalid token.
groups = []
index_end = index_start + limit
if index_end > len(groups):
index_end = len(groups)
groups_page = groups[index_start:index_end]
next_token = None
if groups_page and index_end < len(groups):
next_token = groups_page[-1]["logGroupName"]
return groups_page, next_token
def create_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.create_log_stream(log_stream_name)
def delete_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.delete_log_stream(log_stream_name)
def describe_log_streams(
self,
descending,
limit,
log_group_name,
log_stream_name_prefix,
next_token,
order_by,
):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.describe_log_streams(
descending,
limit,
log_group_name,
log_stream_name_prefix,
next_token,
order_by,
)
def put_log_events(
self, log_group_name, log_stream_name, log_events, sequence_token
):
# TODO: add support for sequence_tokens
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.put_log_events(
log_group_name, log_stream_name, log_events, sequence_token
)
def get_log_events(
self,
log_group_name,
log_stream_name,
start_time,
end_time,
limit,
next_token,
start_from_head,
):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.get_log_events(
log_group_name,
log_stream_name,
start_time,
end_time,
limit,
next_token,
start_from_head,
)
def filter_log_events(
self,
log_group_name,
log_stream_names,
start_time,
end_time,
limit,
next_token,
filter_pattern,
interleaved,
):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.filter_log_events(
log_group_name,
log_stream_names,
start_time,
end_time,
limit,
next_token,
filter_pattern,
interleaved,
)
def put_retention_policy(self, log_group_name, retention_in_days):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(retention_in_days)
def delete_retention_policy(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(None)
def list_tags_log_group(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.list_tags()
def tag_log_group(self, log_group_name, tags):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
log_group.tag(tags)
def untag_log_group(self, log_group_name, tags):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
log_group.untag(tags)
def describe_subscription_filters(self, log_group_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
return log_group.describe_subscription_filters()
def put_subscription_filter(
self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn
):
# TODO: support other destinations like Kinesis stream
from moto.awslambda import lambda_backends # due to circular dependency
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
lambda_func = lambda_backends[self.region_name].get_function(destination_arn)
# no specific permission check implemented
if not lambda_func:
raise InvalidParameterException(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
log_group.put_subscription_filter(
filter_name, filter_pattern, destination_arn, role_arn
)
def delete_subscription_filter(self, log_group_name, filter_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
log_group.delete_subscription_filter(filter_name)
logs_backends = {}
for region in Session().get_available_regions("logs"):
logs_backends[region] = LogsBackend(region)
for region in Session().get_available_regions("logs", partition_name="aws-us-gov"):
logs_backends[region] = LogsBackend(region)
for region in Session().get_available_regions("logs", partition_name="aws-cn"):
logs_backends[region] = LogsBackend(region)
|
# -*- coding: utf-8 -*-
'''
Tests for salt.utils.data
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.stringutils
from salt.utils.odict import OrderedDict
from tests.support.unit import TestCase, skipIf, LOREM_IPSUM
from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON
from salt.ext.six.moves import builtins # pylint: disable=import-error,redefined-builtin
from salt.ext import six
log = logging.getLogger(__name__)
_b = lambda x: x.encode('utf-8')
_s = lambda x: salt.utils.stringutils.to_str(x, normalize=True)
# Some randomized data that will not decode
BYTES = b'1\x814\x10'
# This is an example of a unicode string with й constructed using two separate
# code points. Do not modify it.
EGGS = '\u044f\u0438\u0306\u0446\u0430'
class DataTestCase(TestCase):
test_data = [
'unicode_str',
_b('питон'),
123,
456.789,
True,
False,
None,
EGGS,
BYTES,
[123, 456.789, _b('спам'), True, False, None, EGGS, BYTES],
(987, 654.321, _b('яйца'), EGGS, None, (True, EGGS, BYTES)),
{_b('str_key'): _b('str_val'),
None: True,
123: 456.789,
EGGS: BYTES,
_b('subdict'): {'unicode_key': EGGS,
_b('tuple'): (123, 'hello', _b('world'), True, EGGS, BYTES),
_b('list'): [456, _b('спам'), False, EGGS, BYTES]}},
OrderedDict([(_b('foo'), 'bar'), (123, 456), (EGGS, BYTES)])
]
def test_sorted_ignorecase(self):
test_list = ['foo', 'Foo', 'bar', 'Bar']
expected_list = ['bar', 'Bar', 'foo', 'Foo']
self.assertEqual(
salt.utils.data.sorted_ignorecase(test_list), expected_list)
def test_mysql_to_dict(self):
test_mysql_output = ['+----+------+-----------+------+---------+------+-------+------------------+',
'| Id | User | Host | db | Command | Time | State | Info |',
'+----+------+-----------+------+---------+------+-------+------------------+',
'| 7 | root | localhost | NULL | Query | 0 | init | show processlist |',
'+----+------+-----------+------+---------+------+-------+------------------+']
ret = salt.utils.data.mysql_to_dict(test_mysql_output, 'Info')
expected_dict = {
'show processlist': {'Info': 'show processlist', 'db': 'NULL', 'State': 'init', 'Host': 'localhost',
'Command': 'Query', 'User': 'root', 'Time': 0, 'Id': 7}}
self.assertDictEqual(ret, expected_dict)
def test_subdict_match(self):
test_two_level_dict = {'foo': {'bar': 'baz'}}
test_two_level_comb_dict = {'foo': {'bar': 'baz:woz'}}
test_two_level_dict_and_list = {
'abc': ['def', 'ghi', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}],
}
test_three_level_dict = {'a': {'b': {'c': 'v'}}}
self.assertTrue(
salt.utils.data.subdict_match(
test_two_level_dict, 'foo:bar:baz'
)
)
# In test_two_level_comb_dict, 'foo:bar' corresponds to 'baz:woz', not
# 'baz'. This match should return False.
self.assertFalse(
salt.utils.data.subdict_match(
test_two_level_comb_dict, 'foo:bar:baz'
)
)
# This tests matching with the delimiter in the value part (in other
# words, that the path 'foo:bar' corresponds to the string 'baz:woz').
self.assertTrue(
salt.utils.data.subdict_match(
test_two_level_comb_dict, 'foo:bar:baz:woz'
)
)
# This would match if test_two_level_comb_dict['foo']['bar'] was equal
# to 'baz:woz:wiz', or if there was more deep nesting. But it does not,
# so this should return False.
self.assertFalse(
salt.utils.data.subdict_match(
test_two_level_comb_dict, 'foo:bar:baz:woz:wiz'
)
)
# This tests for cases when a key path corresponds to a list. The
# value part 'ghi' should be successfully matched as it is a member of
# the list corresponding to key path 'abc'. It is somewhat a
# duplication of a test within test_traverse_dict_and_list, but
# salt.utils.data.subdict_match() does more than just invoke
# salt.utils.traverse_list_and_dict() so this particular assertion is a
# sanity check.
self.assertTrue(
salt.utils.data.subdict_match(
test_two_level_dict_and_list, 'abc:ghi'
)
)
# This tests the use case of a dict embedded in a list, embedded in a
# list, embedded in a dict. This is a rather absurd case, but it
# confirms that match recursion works properly.
self.assertTrue(
salt.utils.data.subdict_match(
test_two_level_dict_and_list, 'abc:lorem:ipsum:dolor:sit'
)
)
# Test four level dict match for reference
self.assertTrue(
salt.utils.data.subdict_match(
test_three_level_dict, 'a:b:c:v'
)
)
self.assertFalse(
# Test regression in 2015.8 where 'a:c:v' would match 'a:b:c:v'
salt.utils.data.subdict_match(
test_three_level_dict, 'a:c:v'
)
)
# Test wildcard match
self.assertTrue(
salt.utils.data.subdict_match(
test_three_level_dict, 'a:*:c:v'
)
)
def test_subdict_match_with_wildcards(self):
'''
Tests subdict matching when wildcards are used in the expression
'''
data = {
'a': {
'b': {
'ç': 'd',
'é': ['eff', 'gee', '8ch'],
'ĩ': {'j': 'k'}
}
}
}
assert salt.utils.data.subdict_match(data, '*:*:*:*')
assert salt.utils.data.subdict_match(data, 'a:*:*:*')
assert salt.utils.data.subdict_match(data, 'a:b:*:*')
assert salt.utils.data.subdict_match(data, 'a:b:ç:*')
assert salt.utils.data.subdict_match(data, 'a:b:*:d')
assert salt.utils.data.subdict_match(data, 'a:*:ç:d')
assert salt.utils.data.subdict_match(data, '*:b:ç:d')
assert salt.utils.data.subdict_match(data, '*:*:ç:d')
assert salt.utils.data.subdict_match(data, '*:*:*:d')
assert salt.utils.data.subdict_match(data, 'a:*:*:d')
assert salt.utils.data.subdict_match(data, 'a:b:*:ef*')
assert salt.utils.data.subdict_match(data, 'a:b:*:g*')
assert salt.utils.data.subdict_match(data, 'a:b:*:j:*')
assert salt.utils.data.subdict_match(data, 'a:b:*:j:k')
assert salt.utils.data.subdict_match(data, 'a:b:*:*:k')
assert salt.utils.data.subdict_match(data, 'a:b:*:*:*')
def test_traverse_dict(self):
test_two_level_dict = {'foo': {'bar': 'baz'}}
self.assertDictEqual(
{'not_found': 'nope'},
salt.utils.data.traverse_dict(
test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'}
)
)
self.assertEqual(
'baz',
salt.utils.data.traverse_dict(
test_two_level_dict, 'foo:bar', {'not_found': 'not_found'}
)
)
def test_traverse_dict_and_list(self):
test_two_level_dict = {'foo': {'bar': 'baz'}}
test_two_level_dict_and_list = {
'foo': ['bar', 'baz', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}]
}
# Check traversing too far: salt.utils.data.traverse_dict_and_list() returns
# the value corresponding to a given key path, and baz is a value
# corresponding to the key path foo:bar.
self.assertDictEqual(
{'not_found': 'nope'},
salt.utils.data.traverse_dict_and_list(
test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'}
)
)
# Now check to ensure that foo:bar corresponds to baz
self.assertEqual(
'baz',
salt.utils.data.traverse_dict_and_list(
test_two_level_dict, 'foo:bar', {'not_found': 'not_found'}
)
)
# Check traversing too far
self.assertDictEqual(
{'not_found': 'nope'},
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list, 'foo:bar', {'not_found': 'nope'}
)
)
# Check index 1 (2nd element) of list corresponding to path 'foo'
self.assertEqual(
'baz',
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list, 'foo:1', {'not_found': 'not_found'}
)
)
# Traverse a couple times into dicts embedded in lists
self.assertEqual(
'sit',
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list,
'foo:lorem:ipsum:dolor',
{'not_found': 'not_found'}
)
)
def test_compare_dicts(self):
ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'bar'})
self.assertEqual(ret, {})
ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'woz'})
expected_ret = {'foo': {'new': 'woz', 'old': 'bar'}}
self.assertDictEqual(ret, expected_ret)
def test_decode(self):
'''
Companion to test_decode_to_str, they should both be kept up-to-date
with one another.
NOTE: This uses the lambda "_b" defined above in the global scope,
which encodes a string to a bytestring, assuming utf-8.
'''
expected = [
'unicode_str',
'питон',
123,
456.789,
True,
False,
None,
'яйца',
BYTES,
[123, 456.789, 'спам', True, False, None, 'яйца', BYTES],
(987, 654.321, 'яйца', 'яйца', None, (True, 'яйца', BYTES)),
{'str_key': 'str_val',
None: True,
123: 456.789,
'яйца': BYTES,
'subdict': {'unicode_key': 'яйца',
'tuple': (123, 'hello', 'world', True, 'яйца', BYTES),
'list': [456, 'спам', False, 'яйца', BYTES]}},
OrderedDict([('foo', 'bar'), (123, 456), ('яйца', BYTES)])
]
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True)
self.assertEqual(ret, expected)
# The binary data in the data structure should fail to decode, even
# using the fallback, and raise an exception.
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
self.test_data,
keep=False,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [987, 654.321, 'яйца', 'яйца', None, [True, 'яйца', BYTES]]
expected[11]['subdict']['tuple'] = [123, 'hello', 'world', True, 'яйца', BYTES]
expected[12] = {'foo': 'bar', 123: 456, 'яйца': BYTES}
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=False,
preserve_tuples=False)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug('Testing decode of %s', item)
self.assertEqual(salt.utils.data.decode(item), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.decode('foo'), 'foo')
self.assertEqual(salt.utils.data.decode(_b('bar')), 'bar')
self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), 'яйца')
self.assertEqual(salt.utils.data.decode(EGGS, normalize=False), EGGS)
# Test binary blob
self.assertEqual(salt.utils.data.decode(BYTES, keep=True), BYTES)
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
BYTES,
keep=False)
def test_decode_to_str(self):
'''
Companion to test_decode, they should both be kept up-to-date with one
another.
NOTE: This uses the lambda "_s" defined above in the global scope,
which converts the string/bytestring to a str type.
'''
expected = [
_s('unicode_str'),
_s('питон'),
123,
456.789,
True,
False,
None,
_s('яйца'),
BYTES,
[123, 456.789, _s('спам'), True, False, None, _s('яйца'), BYTES],
(987, 654.321, _s('яйца'), _s('яйца'), None, (True, _s('яйца'), BYTES)),
{_s('str_key'): _s('str_val'),
None: True,
123: 456.789,
_s('яйца'): BYTES,
_s('subdict'): {
_s('unicode_key'): _s('яйца'),
_s('tuple'): (123, _s('hello'), _s('world'), True, _s('яйца'), BYTES),
_s('list'): [456, _s('спам'), False, _s('яйца'), BYTES]}},
OrderedDict([(_s('foo'), _s('bar')), (123, 456), (_s('яйца'), BYTES)])
]
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
to_str=True)
self.assertEqual(ret, expected)
if six.PY3:
# The binary data in the data structure should fail to decode, even
# using the fallback, and raise an exception.
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
self.test_data,
keep=False,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
to_str=True)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [987, 654.321, _s('яйца'), _s('яйца'), None, [True, _s('яйца'), BYTES]]
expected[11][_s('subdict')][_s('tuple')] = [123, _s('hello'), _s('world'), True, _s('яйца'), BYTES]
expected[12] = {_s('foo'): _s('bar'), 123: 456, _s('яйца'): BYTES}
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=False,
preserve_tuples=False,
to_str=True)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug('Testing decode of %s', item)
self.assertEqual(salt.utils.data.decode(item, to_str=True), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.decode('foo', to_str=True), _s('foo'))
self.assertEqual(salt.utils.data.decode(_b('bar'), to_str=True), _s('bar'))
# Test binary blob
self.assertEqual(
salt.utils.data.decode(BYTES, keep=True, to_str=True),
BYTES
)
if six.PY3:
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
BYTES,
keep=False,
to_str=True)
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_decode_fallback(self):
'''
Test fallback to utf-8
'''
with patch.object(builtins, '__salt_system_encoding__', 'ascii'):
self.assertEqual(salt.utils.data.decode(_b('яйца')), 'яйца')
def test_encode(self):
'''
NOTE: This uses the lambda "_b" defined above in the global scope,
which encodes a string to a bytestring, assuming utf-8.
'''
expected = [
_b('unicode_str'),
_b('питон'),
123,
456.789,
True,
False,
None,
_b(EGGS),
BYTES,
[123, 456.789, _b('спам'), True, False, None, _b(EGGS), BYTES],
(987, 654.321, _b('яйца'), _b(EGGS), None, (True, _b(EGGS), BYTES)),
{_b('str_key'): _b('str_val'),
None: True,
123: 456.789,
_b(EGGS): BYTES,
_b('subdict'): {_b('unicode_key'): _b(EGGS),
_b('tuple'): (123, _b('hello'), _b('world'), True, _b(EGGS), BYTES),
_b('list'): [456, _b('спам'), False, _b(EGGS), BYTES]}},
OrderedDict([(_b('foo'), _b('bar')), (123, 456), (_b(EGGS), BYTES)])
]
# Both keep=True and keep=False should work because the BYTES data is
# already bytes.
ret = salt.utils.data.encode(
self.test_data,
keep=True,
preserve_dict_class=True,
preserve_tuples=True)
self.assertEqual(ret, expected)
ret = salt.utils.data.encode(
self.test_data,
keep=False,
preserve_dict_class=True,
preserve_tuples=True)
self.assertEqual(ret, expected)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [987, 654.321, _b('яйца'), _b(EGGS), None, [True, _b(EGGS), BYTES]]
expected[11][_b('subdict')][_b('tuple')] = [
123, _b('hello'), _b('world'), True, _b(EGGS), BYTES
]
expected[12] = {_b('foo'): _b('bar'), 123: 456, _b(EGGS): BYTES}
ret = salt.utils.data.encode(
self.test_data,
keep=True,
preserve_dict_class=False,
preserve_tuples=False)
self.assertEqual(ret, expected)
ret = salt.utils.data.encode(
self.test_data,
keep=False,
preserve_dict_class=False,
preserve_tuples=False)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug('Testing encode of %s', item)
self.assertEqual(salt.utils.data.encode(item), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.encode('foo'), _b('foo'))
self.assertEqual(salt.utils.data.encode(_b('bar')), _b('bar'))
# Test binary blob, nothing should happen even when keep=False since
# the data is already bytes
self.assertEqual(salt.utils.data.encode(BYTES, keep=True), BYTES)
self.assertEqual(salt.utils.data.encode(BYTES, keep=False), BYTES)
def test_encode_keep(self):
'''
Whereas we tested the keep argument in test_decode, it is much easier
to do a more comprehensive test of keep in its own function where we
can force the encoding.
'''
unicode_str = 'питон'
encoding = 'ascii'
# Test single string
self.assertEqual(
salt.utils.data.encode(unicode_str, encoding, keep=True),
unicode_str)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
unicode_str,
encoding,
keep=False)
data = [
unicode_str,
[b'foo', [unicode_str], {b'key': unicode_str}, (unicode_str,)],
{b'list': [b'foo', unicode_str],
b'dict': {b'key': unicode_str},
b'tuple': (b'foo', unicode_str)},
([b'foo', unicode_str], {b'key': unicode_str}, (unicode_str,))
]
# Since everything was a bytestring aside from the bogus data, the
# return data should be identical. We don't need to test recursive
# decoding, that has already been tested in test_encode.
self.assertEqual(
salt.utils.data.encode(data, encoding,
keep=True, preserve_tuples=True),
data
)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
data,
encoding,
keep=False,
preserve_tuples=True)
for index, item in enumerate(data):
self.assertEqual(
salt.utils.data.encode(data[index], encoding,
keep=True, preserve_tuples=True),
data[index]
)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
data[index],
encoding,
keep=False,
preserve_tuples=True)
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_encode_fallback(self):
'''
Test fallback to utf-8
'''
with patch.object(builtins, '__salt_system_encoding__', 'ascii'):
self.assertEqual(salt.utils.data.encode('яйца'), _b('яйца'))
with patch.object(builtins, '__salt_system_encoding__', 'CP1252'):
self.assertEqual(salt.utils.data.encode('Ψ'), _b('Ψ'))
def test_repack_dict(self):
list_of_one_element_dicts = [{'dict_key_1': 'dict_val_1'},
{'dict_key_2': 'dict_val_2'},
{'dict_key_3': 'dict_val_3'}]
expected_ret = {'dict_key_1': 'dict_val_1',
'dict_key_2': 'dict_val_2',
'dict_key_3': 'dict_val_3'}
ret = salt.utils.data.repack_dictlist(list_of_one_element_dicts)
self.assertDictEqual(ret, expected_ret)
# Try with yaml
yaml_key_val_pair = '- key1: val1'
ret = salt.utils.data.repack_dictlist(yaml_key_val_pair)
self.assertDictEqual(ret, {'key1': 'val1'})
# Make sure we handle non-yaml junk data
ret = salt.utils.data.repack_dictlist(LOREM_IPSUM)
self.assertDictEqual(ret, {})
def test_stringify(self):
self.assertRaises(TypeError, salt.utils.data.stringify, 9)
self.assertEqual(
salt.utils.data.stringify(['one', 'two', str('three'), 4, 5]), # future lint: disable=blacklisted-function
['one', 'two', 'three', '4', '5']
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 14:47:33 2019
@author: Matteo Papini
"""
import torch
import gym
import potion.envs
from potion.actors.discrete_policies import ShallowGibbsPolicy
from potion.common.logger import Logger
from potion.algorithms.safe import spg
import argparse
import re
from potion.common.rllab_utils import rllab_env_from_name, Rllab2GymWrapper
from potion.meta.smoothing_constants import gibbs_lip_const
from potion.meta.error_bounds import hoeffding_bounded_score
# Command line arguments
parser = argparse.ArgumentParser(formatter_class
=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--name', help='Experiment name', type=str, default='SPG')
parser.add_argument('--storage', help='root of log directories', type=str, default='..')
parser.add_argument('--estimator', help='PG estimator (reinforce/gpomdp)',
type=str, default='gpomdp')
parser.add_argument('--baseline', help='control variate (avg/peters/zero)',
type=str, default='peters')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--env', help='Gym environment id', type=str,
default='GridWorld-v0')
parser.add_argument('--horizon', help='Task horizon', type=int, default=10)
parser.add_argument('--max_samples', help='Maximum total samples', type=int,
default=1e7)
parser.add_argument('--mini_batchsize', help='(Minimum) batch size', type=int,
default=100)
parser.add_argument('--max_batchsize', help='Maximum batch size', type=int,
default=100000)
parser.add_argument('--disc', help='Discount factor', type=float, default=0.9)
parser.add_argument('--conf', help='Confidence', type=float, default=0.8)
parser.add_argument('--std_init', help='Initial policy std', type=float,
default=1.)
parser.add_argument('--max_feat', help='Maximum state feature', type=float,
default=1.)
parser.add_argument('--max_rew', help='Maximum reward', type=float,
default=1.)
parser.add_argument("--fast", help="speed up",
action="store_true")
parser.add_argument("--no-fast", help="Do not speed up",
action="store_false")
parser.add_argument("--render", help="Render an episode",
action="store_true")
parser.add_argument("--no-render", help="Do not render any episode",
action="store_false")
parser.add_argument("--temp", help="Save logs in temp folder",
action="store_true")
parser.add_argument("--no-temp", help="Save logs in logs folder",
action="store_false")
parser.set_defaults(fast=True, render=False, temp=False)
args = parser.parse_args()
# Prepare
if args.env.startswith('rllab'):
env_rllab_class = rllab_env_from_name(args.env)
env_rllab = env_rllab_class()
env = Rllab2GymWrapper(env_rllab)
else:
env = gym.make(args.env)
env.seed(args.seed)
m = sum(env.observation_space.shape)
d = sum(env.action_space.shape)
mu_init = torch.zeros(m)
logstd_init = torch.log(torch.zeros(1) + args.std_init)
policy = ShallowGibbsPolicy(env,
temp=1.)
envname = re.sub(r'[^a-zA-Z]', "", args.env)[:-1]
envname = re.sub(r'[^a-zA-Z]', "", args.env)[:-1].lower()
logname = envname + '_' + args.name + '_' + str(args.seed)
if args.temp:
logger = Logger(directory= args.storage + '/temp', name = logname, modes=['human', 'csv'])
else:
logger = Logger(directory=args.storage + '/logs', name = logname, modes=['human', 'csv'])
#Constants
lip_const = gibbs_lip_const(args.max_feat, args.max_rew, args.disc,
1.)
print(lip_const)
score_bound = 2 * args.max_feat
err_bound = hoeffding_bounded_score(args.max_rew, score_bound, args.disc, args.horizon,
dim=16, estimator=args.estimator)
# Run
spg(env, policy, args.horizon, lip_const, err_bound,
fail_prob = 1. - args.conf,
mini_batchsize = args.mini_batchsize,
max_batchsize = args.max_batchsize,
max_samples = args.max_samples,
disc = args.disc,
fast = args.fast,
seed = args.seed,
logger = logger,
render = args.render,
shallow = True,
estimator = args.estimator,
baseline = args.baseline,
log_params=False,
save_params=False)
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'planguru.core'
verbose_name = "Core"
def ready(self):
import planguru.core.signals
pass
|
from tensorflow.keras.layers import Input, Flatten, concatenate, Activation
from tensorflow.keras.layers import Dense, Conv2D
class Topology:
"""Base class for creating headless Keras computation graphs with arbitrary architecture.
Input layer is pre-defined, and resides in `self.input`,
to be used by user-defined child method.
Child init methods are not required.
Any parameters required by the graph definition, e.g. layer sizes, should be made
parameters for the `define_graph` method. They will become the initialization
parameters of the class.
For example, if you wanted `layer_sizes, activation` to be the parameters required
to define the model, you would make those the parameters of the
child method `define_graph`, and then initialize
the object with `Topology(layer_sizes, activation)`.
Parameters
----------
**define_graph_kwargs
kwargs to be passed to define_graph method.
"""
def __init__(self, **define_graph_kwargs):
"""Store arguments for parameters required by define_graph in a dictionary.
Note: args must be passed to init as kwargs, not positional args."""
# Part 1: the keywords and args passed are composed into a dictionary and stored
self.define_graph_kwargs = define_graph_kwargs
def configure(self, agent):
"""Set up input and output layers and store them in member variables.
Input is configured according to the associated agent.
Output is the return value of define_graph, as defined by the child class.
Parameters
----------
agent : pavlov.Agent
Agent to associate with model.
"""
self.input = Input(shape=agent.state_pipeline.out_dims,
dtype=agent.replay_buffer.state_dtype)
# Part 2: the dictionary of args is unpacked and passed to define_graph
self.output = self.define_graph(**self.define_graph_kwargs)
def define_graph(self, **define_graph_kwargs):
"""Define any arbitrary Keras graph, making use of the kwargs passed to init.
Parameters
----------
**define_graph_kwargs
Can be any arguments necessary to define a model.
Raises
------
NotImplementedError.
"""
# Part 3: define_graph is necessarily declared with the right keyword args
raise NotImplementedError
class CNNTopology(Topology):
"""Creates headless Keras computation graph for a convolutional architecture.
Starts with convolutional layers according to `conv_layer_sizes`, `kernel_sizes`, and `strides`.
Flattens those, then passes flattened vector to dense layers according to `fc_layer_sizes`.
Uses `activation` as activation along the way.
Always uses `same` padding.
The input shape is discovered at configuration time, from the state pipeline.
"""
def define_graph(self, conv_layer_sizes, fc_layer_sizes, kernel_sizes, strides, activation):
"""Defines headless Keras graph for a CNN.
Parameters
----------
conv_layer_sizes : list of int
number of filters in each convolutional layer.
fc_layer_sizes : list of int
number of units in each dense layer.
kernel_sizes : list of 2-tuple of int
2-dimensional size of convolutional kernel.
strides : list of 2-tuple of int
dimensions of stride in convolution.
activation : str
activation function to be used in each layer.
Returns
-------
out : keras.Layer
Final Keras layer of headless architecture, to be passed on to the final layers.
"""
out = self.input
for l_size, k_size, stride in zip(conv_layer_sizes, kernel_sizes, strides):
out = Conv2D(filters=l_size, kernel_size=k_size, padding='same',
strides=stride, activation=activation)(out)
out = Flatten()(out)
for l_size in fc_layer_sizes:
out = Dense(l_size, activation=activation)(out)
return out
class DenseTopology(Topology):
"""Creates headless Keras computation graph for a fully connected architecture.
Passes input through a series of dense layers, with chosen `activation`.
"""
def define_graph(self, layer_sizes, activation):
"""Defines headless Keras graph for a fully connected network.
Parameters
----------
layer_sizes : list of int
number of units in each dense layer.
activation : str
activation function to be used in each layer.
Returns
-------
out : keras.Layer
Final Keras layer of headless architecture, to be passed on to the final layers.
"""
out = self.input
for L in layer_sizes:
out = Dense(L, activation=activation)(out)
return out
|
from datetime import datetime
import os
import urllib.request
import magic
import re
from flask import current_app
from notifications_utils.recipients import (
validate_and_format_phone_number,
validate_and_format_email_address
)
from notifications_utils.template import HTMLEmailTemplate, PlainTextEmailTemplate, SMSMessageTemplate
from app import clients, statsd_client, create_uuid
from app.dao.notifications_dao import (
dao_update_notification
)
from app.dao.provider_details_dao import (
get_provider_details_by_notification_type,
dao_toggle_sms_provider
)
from app.celery.research_mode_tasks import send_sms_response, send_email_response
from app.dao.templates_dao import dao_get_template_by_id
from app.exceptions import NotificationTechnicalFailureException, MalwarePendingException
from app.models import (
SMS_TYPE,
KEY_TYPE_TEST,
BRANDING_BOTH_EN,
BRANDING_BOTH_FR,
BRANDING_ORG_BANNER_NEW,
EMAIL_TYPE,
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_VIRUS_SCAN_FAILED,
NOTIFICATION_CONTAINS_PII,
NOTIFICATION_SENDING,
NOTIFICATION_SENT,
)
from app.clients.mlwr.mlwr import check_mlwr_score
from app.utils import get_logo_url
def send_sms_to_provider(notification):
service = notification.service
if not service.active:
technical_failure(notification=notification)
return
if notification.status == 'created':
provider = provider_to_use(
SMS_TYPE,
notification.id,
notification.international,
notification.reply_to_text
)
template_model = dao_get_template_by_id(notification.template_id, notification.template_version)
template = SMSMessageTemplate(
template_model.__dict__,
values=notification.personalisation,
prefix=service.name,
show_prefix=service.prefix_sms,
)
if service.research_mode or notification.key_type == KEY_TYPE_TEST:
update_notification_to_sending(notification, provider)
send_sms_response(provider.get_name(), str(notification.id), notification.to)
else:
try:
reference = provider.send_sms(
to=validate_and_format_phone_number(notification.to, international=notification.international),
content=str(template),
reference=str(notification.id),
sender=notification.reply_to_text
)
except Exception as e:
notification.billable_units = template.fragment_count
dao_update_notification(notification)
dao_toggle_sms_provider(provider.name)
raise e
else:
notification.reference = reference
notification.billable_units = template.fragment_count
update_notification_to_sending(notification, provider)
delta_milliseconds = (datetime.utcnow() - notification.created_at).total_seconds() * 1000
statsd_client.timing("sms.total-time", delta_milliseconds)
def send_email_to_provider(notification):
service = notification.service
if not service.active:
technical_failure(notification=notification)
return
if notification.status == 'created':
provider = provider_to_use(EMAIL_TYPE, notification.id)
# Extract any file objects from the personalization
file_keys = [
k for k, v in (notification.personalisation or {}).items() if isinstance(v, dict) and 'document' in v
]
attachments = []
personalisation_data = notification.personalisation.copy()
for key in file_keys:
# Check if a MLWR sid exists
if (current_app.config["MLWR_HOST"] and
'mlwr_sid' in personalisation_data[key]['document'] and
personalisation_data[key]['document']['mlwr_sid'] != "false"):
mlwr_result = check_mlwr(personalisation_data[key]['document']['mlwr_sid'])
if "state" in mlwr_result and mlwr_result["state"] == "completed":
# Update notification that it contains malware
if "submission" in mlwr_result and mlwr_result["submission"]['max_score'] >= 500:
malware_failure(notification=notification)
return
else:
# Throw error so celery will retry in sixty seconds
raise MalwarePendingException
try:
req = urllib.request.Request(personalisation_data[key]['document']['direct_file_url'])
with urllib.request.urlopen(req) as response:
buffer = response.read()
mime_type = magic.from_buffer(buffer, mime=True)
if mime_type == 'application/pdf':
attachments.append({"name": "{}.pdf".format(key), "data": buffer})
except Exception:
current_app.logger.error(
"Could not download and attach {}".format(personalisation_data[key]['document']['direct_file_url'])
)
personalisation_data[key] = personalisation_data[key]['document']['url']
template_dict = dao_get_template_by_id(notification.template_id, notification.template_version).__dict__
# Local Jinja support - Add USE_LOCAL_JINJA_TEMPLATES=True to .env
# Add a folder to the project root called 'jinja_templates'
# with a copy of 'email_template.jinja2' from notification-utils repo
debug_template_path = (os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.environ.get('USE_LOCAL_JINJA_TEMPLATES') == 'True' else None)
html_email = HTMLEmailTemplate(
template_dict,
values=personalisation_data,
jinja_path=debug_template_path,
**get_html_email_options(service)
)
plain_text_email = PlainTextEmailTemplate(
template_dict,
values=personalisation_data
)
if current_app.config["SCAN_FOR_PII"]:
contains_pii(notification, str(plain_text_email))
if service.research_mode or notification.key_type == KEY_TYPE_TEST:
notification.reference = str(create_uuid())
update_notification_to_sending(notification, provider)
send_email_response(notification.reference, notification.to)
else:
if service.sending_domain is None or service.sending_domain.strip() == "":
sending_domain = current_app.config['NOTIFY_EMAIL_DOMAIN']
else:
sending_domain = service.sending_domain
from_address = '"{}" <{}@{}>'.format(service.name, service.email_from,
sending_domain)
email_reply_to = notification.reply_to_text
reference = provider.send_email(
from_address,
validate_and_format_email_address(notification.to),
plain_text_email.subject,
body=str(plain_text_email),
html_body=str(html_email),
reply_to_address=validate_and_format_email_address(email_reply_to) if email_reply_to else None,
attachments=attachments
)
notification.reference = reference
update_notification_to_sending(notification, provider)
delta_milliseconds = (datetime.utcnow() - notification.created_at).total_seconds() * 1000
statsd_client.timing("email.total-time", delta_milliseconds)
def update_notification_to_sending(notification, provider):
notification.sent_at = datetime.utcnow()
notification.sent_by = provider.get_name()
notification.status = NOTIFICATION_SENT if notification.notification_type == "sms" else NOTIFICATION_SENDING
dao_update_notification(notification)
def provider_to_use(notification_type, notification_id, international=False, sender=None):
active_providers_in_order = [
p for p in get_provider_details_by_notification_type(notification_type, international) if p.active
]
if not active_providers_in_order:
current_app.logger.error(
"{} {} failed as no active providers".format(notification_type, notification_id)
)
raise Exception("No active {} providers".format(notification_type))
return clients.get_client_by_name_and_type(active_providers_in_order[0].identifier, notification_type)
def get_html_email_options(service):
if service.email_branding is None:
if service.default_branding_is_french is True:
return {
'fip_banner_english': False,
'fip_banner_french': True,
'logo_with_background_colour': False,
}
else:
return {
'fip_banner_english': True,
'fip_banner_french': False,
'logo_with_background_colour': False,
}
logo_url = get_logo_url(
service.email_branding.logo
) if service.email_branding.logo else None
return {
'fip_banner_english': service.email_branding.brand_type == BRANDING_BOTH_EN,
'fip_banner_french': service.email_branding.brand_type == BRANDING_BOTH_FR,
'logo_with_background_colour': service.email_branding.brand_type == BRANDING_ORG_BANNER_NEW,
'brand_colour': service.email_branding.colour,
'brand_logo': logo_url,
'brand_text': service.email_branding.text,
'brand_name': service.email_branding.name,
}
def technical_failure(notification):
notification.status = NOTIFICATION_TECHNICAL_FAILURE
dao_update_notification(notification)
raise NotificationTechnicalFailureException(
"Send {} for notification id {} to provider is not allowed: service {} is inactive".format(
notification.notification_type,
notification.id,
notification.service_id))
def malware_failure(notification):
notification.status = NOTIFICATION_VIRUS_SCAN_FAILED
dao_update_notification(notification)
raise NotificationTechnicalFailureException(
"Send {} for notification id {} to provider is not allowed. Notification contains malware".format(
notification.notification_type,
notification.id))
def check_mlwr(sid):
return check_mlwr_score(sid)
def contains_pii(notification, text_content):
for sin in re.findall(r'\s\d{3}-\d{3}-\d{3}\s', text_content):
if luhn(sin.replace("-", "").strip()):
fail_pii(notification, "Social Insurance Number")
return
def fail_pii(notification, pii_type):
notification.status = NOTIFICATION_CONTAINS_PII
dao_update_notification(notification)
raise NotificationTechnicalFailureException(
"Send {} for notification id {} to provider is not allowed. Notification contains PII: {}".format(
notification.notification_type,
notification.id,
pii_type))
def luhn(n):
r = [int(ch) for ch in n][::-1]
return (sum(r[0::2]) + sum(sum(divmod(d * 2, 10)) for d in r[1::2])) % 10 == 0
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import sys
import time
from threading import Timer
import subprocess
from framework import S3PyCliTest
from framework import Config
from framework import logit
from shlex import quote
class S3cmdTest(S3PyCliTest):
def __init__(self, description):
self.s3cfg = os.path.join(os.path.dirname(os.path.realpath(__file__)), Config.config_file)
self._send_retries = " --max-retries=" + str(Config.s3cmd_max_retries) + " "
self.credentials = ""
super(S3cmdTest, self).__init__(description)
def setup(self):
if hasattr(self, 'filename') and hasattr(self, 'filesize'):
file_to_create = os.path.join(self.working_dir, self.filename)
logit("Creating file [%s] with size [%d]" % (file_to_create, self.filesize))
with open(file_to_create, 'wb') as fout:
fout.write(os.urandom(self.filesize))
super(S3cmdTest, self).setup()
def run(self):
super(S3cmdTest, self).run()
def teardown(self):
super(S3cmdTest, self).teardown()
def with_credentials(self, access_key, secret_key):
self.credentials = " --access_key=" + access_key +\
" --secret_key=" + secret_key
return self
def with_cli(self, cmd):
if Config.no_ssl:
cmd = cmd + " --no-ssl"
super(S3PyCliTest, self).with_cli(cmd)
def create_bucket(self, bucket_name, region=None, host=None, no_check_hostname=None):
self.bucket_name = bucket_name
if host:
if region:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + "--host=" +
host + " --host-bucket=" + bucket_name + "." + host + " mb " +
" s3://" + self.bucket_name + " --bucket-location=" + region)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name
+ "." + host +" mb " + " s3://" + self.bucket_name)
else:
if region:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " mb " + " s3://" + self.bucket_name + " --bucket-location=" + region)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " mb " + " s3://" + self.bucket_name)
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def list_buckets(self, host=None, no_check_hostname=None):
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + host + " ls ")
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " ls ")
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def info_bucket(self, bucket_name, host=None, no_check_hostname=None):
self.bucket_name = bucket_name
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" info " + " s3://" + self.bucket_name)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " info " + " s3://" + self.bucket_name)
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def info_object(self, bucket_name, filename, host=None, no_check_hostname=None):
self.bucket_name = bucket_name
self.filename = filename
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" info " + quote("s3://" + self.bucket_name + "/" + self.filename ))
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " info " + quote("s3://" + self.bucket_name + "/" + self.filename))
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
return self
def delete_bucket(self, bucket_name, host=None, no_check_hostname=None):
self.bucket_name = bucket_name
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" rb " + " s3://" + self.bucket_name)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " rb " + " s3://" + self.bucket_name)
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def list_objects(self, bucket_name, host=None, no_check_hostname=None):
self.bucket_name = bucket_name
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" ls " + " s3://" + self.bucket_name)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " ls " + " s3://" + self.bucket_name)
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def list_all_objects(self):
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " la ")
return self
def list_specific_objects(self, bucket_name, object_pattern):
self.bucket_name = bucket_name
self.object_pattern = object_pattern
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " ls " + quote("s3://" + self.bucket_name + "/" + self.object_pattern))
return self
def disk_usage_bucket(self, bucket_name):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " du " + " s3://" + self.bucket_name)
return self
def upload_test(self, bucket_name, filename, filesize, host=None, no_check_hostname=None):
self.filename = filename
self.filesize = filesize
self.bucket_name = bucket_name
s3target = "s3://" + bucket_name;
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" put " + self.filename + " s3://" + self.bucket_name)
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " put " + quote(self.filename) + " " +
quote(s3target))
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def upload_copy_test(self, bucket_name, srcfile, destfile):
self.srcfile = srcfile
self.destfile = destfile
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " cp " + quote("s3://" + self.bucket_name + "/" + self.srcfile) + " " + quote("s3://" + self.bucket_name + "/" + self.destfile))
return self
def upload_move_test(self, src_bucket, srcfile, dest_bucket, destfile):
self.srcfile = srcfile
self.destfile = destfile
self.src_bucket = src_bucket
self.dest_bucket = dest_bucket
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " mv " + quote("s3://" + self.src_bucket + "/" + self.srcfile) + " " + quote("s3://" + self.dest_bucket + "/" + self.destfile))
return self
def list_multipart_uploads(self, bucket_name):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " multipart s3://" + self.bucket_name)
return self
def abort_multipart(self, bucket_name, filename, upload_id):
self.filename = filename
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " abortmp " + quote("s3://" + self.bucket_name + "/" + self.filename) + " " + upload_id)
return self
def list_parts(self, bucket_name, filename, upload_id):
self.filename = filename
self.bucket_name = bucket_name
s3target = "s3://" + bucket_name + "/" + filename;
cmd = "s3cmd --no-mime-magic -c %s %s listmp %s %s" % (self.s3cfg, self._send_retries,
quote(s3target), upload_id)
self.with_cli(cmd)
return self
def download_test(self, bucket_name, filename, host=None, no_check_hostname=None):
self.filename = filename
self.bucket_name = bucket_name
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" get " + quote("s3://" + self.bucket_name + "/" + self.filename))
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " get " + quote("s3://" + self.bucket_name + "/" + self.filename))
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
return self
def setacl_bucket(self, bucket_name, acl_perm):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " setacl " + "s3://" + self.bucket_name + " --acl-grant=" + acl_perm)
return self
def setpolicy_bucket(self, bucket_name, policyfile):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " setpolicy " + self.working_dir + "/../" + policyfile + " s3://" + self.bucket_name)
return self
def delpolicy_bucket(self, bucket_name):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " delpolicy " + " s3://" + self.bucket_name)
return self
def accesslog_bucket(self, bucket_name):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " accesslog " + " s3://" + self.bucket_name)
return self
def fixbucket(self, bucket_name):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " fixbucket " + " s3://" + self.bucket_name)
return self
def setacl_object(self, bucket_name, filename, acl_perm):
self.filename = filename
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " setacl " + "s3://" + self.bucket_name + "/" + self.filename + " --acl-grant=" + acl_perm)
return self
def revoke_acl_bucket(self, bucket_name, acl_perm):
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " setacl " + "s3://" + self.bucket_name + " --acl-revoke=" + acl_perm)
return self
def revoke_acl_object(self, bucket_name, filename, acl_perm):
self.filename = filename
self.bucket_name = bucket_name
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " setacl " + "s3://" + self.bucket_name + "/" + self.filename + " --acl-revoke=" + acl_perm)
return self
def stop_s3authserver_test(self):
cmd = "sudo systemctl stop s3authserver.service";
self.with_cli(cmd)
return self
def start_s3authserver_test(self):
cmd = "sudo systemctl start s3authserver.service";
self.with_cli(cmd)
return self
def delete_test(self, bucket_name, filename, host=None, no_check_hostname=None):
self.filename = filename
self.bucket_name = bucket_name
if host:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries +
"--host=" + host + " --host-bucket=" + bucket_name + "." + host +
" del " + quote("s3://" + self.bucket_name + "/" + self.filename))
else:
self.with_cli("s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " del " + quote("s3://" + self.bucket_name + "/" + self.filename))
if no_check_hostname:
self.command = self.command + " --no-check-hostname"
self.command = self.command + self.credentials
return self
def multi_delete_test(self, bucket_name, quiet_mode=False):
self.bucket_name = bucket_name
cmd = "s3cmd --no-mime-magic -c " + self.s3cfg + self._send_retries + " del " + "s3://" + self.bucket_name + "/ --recursive --force"
if quiet_mode:
cmd += " --quiet"
self.with_cli(cmd)
self.command = self.command + self.credentials
return self
|
"""
.. module: lemur.plugins.lemur_cfssl.plugin
:platform: Unix
:synopsis: This module is responsible for communicating with the CFSSL private CA.
:copyright: (c) 2018 by Thomson Reuters
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Charles Hendrie <chad.hendrie@tr.com>
"""
import json
import requests
from flask import current_app
from lemur.common.utils import parse_certificate
from lemur.common.utils import get_authority_key
from lemur.plugins.bases import IssuerPlugin
from lemur.plugins import lemur_cfssl as cfssl
from lemur.extensions import metrics
class CfsslIssuerPlugin(IssuerPlugin):
title = 'CFSSL'
slug = 'cfssl-issuer'
description = 'Enables the creation of certificates by CFSSL private CA'
version = cfssl.VERSION
author = 'Charles Hendrie'
author_url = 'https://github.com/netflix/lemur.git'
def __init__(self, *args, **kwargs):
self.session = requests.Session()
super(CfsslIssuerPlugin, self).__init__(*args, **kwargs)
def create_certificate(self, csr, issuer_options):
"""
Creates a CFSSL certificate.
:param csr:
:param issuer_options:
:return:
"""
current_app.logger.info("Requesting a new cfssl certificate with csr: {0}".format(csr))
url = "{0}{1}".format(current_app.config.get('CFSSL_URL'), '/api/v1/cfssl/sign')
data = {'certificate_request': csr}
data = json.dumps(data)
response = self.session.post(url, data=data.encode(encoding='utf_8', errors='strict'))
if response.status_code > 399:
metrics.send('cfssl_create_certificate_failure', 'counter', 1)
raise Exception(
"Error creating cert. Please check your CFSSL API server")
response_json = json.loads(response.content.decode('utf_8'))
cert = response_json['result']['certificate']
parsed_cert = parse_certificate(cert)
metrics.send('cfssl_create_certificate_success', 'counter', 1)
return cert, current_app.config.get('CFSSL_INTERMEDIATE'), parsed_cert.serial_number
@staticmethod
def create_authority(options):
"""
Creates an authority, this authority is then used by Lemur to allow a user
to specify which Certificate Authority they want to sign their certificate.
:param options:
:return:
"""
role = {'username': '', 'password': '', 'name': 'cfssl'}
return current_app.config.get('CFSSL_ROOT'), "", [role]
def revoke_certificate(self, certificate, comments):
"""Revoke a CFSSL certificate."""
base_url = current_app.config.get('CFSSL_URL')
create_url = '{0}/api/v1/cfssl/revoke'.format(base_url)
data = '{"serial": "' + certificate.external_id + '","authority_key_id": "' + \
get_authority_key(certificate.body) + \
'", "reason": "superseded"}'
current_app.logger.debug("Revoking cert: {0}".format(data))
response = self.session.post(
create_url, data=data.encode(encoding='utf_8', errors='strict'))
if response.status_code > 399:
metrics.send('cfssl_revoke_certificate_failure', 'counter', 1)
raise Exception(
"Error revoking cert. Please check your CFSSL API server")
metrics.send('cfssl_revoke_certificate_success', 'counter', 1)
return response.json()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.