code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import behave
@behave.when(u"I list triggers")
def step_impl(context):
context.trigger_list = context.service.triggers.list()
@behave.then(u'I receive a Trigger list of "{count}" objects')
def step_impl(context, count):
assert context.trigger_list.items_count == int(count)
if int(count) > 0:
for page in context.trigger_list:
for trigger in page:
assert isinstance(trigger, context.dl.entities.Trigger) or \
isinstance(trigger, context.dl.entities.trigger.CronTrigger)
| [
"behave.when",
"behave.then"
] | [((17, 48), 'behave.when', 'behave.when', (['u"""I list triggers"""'], {}), "(u'I list triggers')\n", (28, 48), False, 'import behave\n'), ((135, 196), 'behave.then', 'behave.then', (['u"""I receive a Trigger list of "{count}" objects"""'], {}), '(u\'I receive a Trigger list of "{count}" objects\')\n', (146, 196), False, 'import behave\n')] |
# Combinatoric selections
# https://projecteuler.net/problem=53
from collections import defaultdict
from copy import deepcopy
from itertools import permutations
from math import fmod, sqrt, factorial
from time import time
start = time()
f = [factorial(i) for i in range(101)]
ans = 0
for n in range(1, 101):
for r in range(1, n+1):
if f[n] / (f[r] * f[n-r]) >= 1000000: ans += 1
print(ans)
print(time() - start, "seconds") | [
"math.factorial",
"time.time"
] | [((232, 238), 'time.time', 'time', ([], {}), '()\n', (236, 238), False, 'from time import time\n'), ((245, 257), 'math.factorial', 'factorial', (['i'], {}), '(i)\n', (254, 257), False, 'from math import fmod, sqrt, factorial\n'), ((411, 417), 'time.time', 'time', ([], {}), '()\n', (415, 417), False, 'from time import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
class animacija2D:
def __init__(self, f, xInterval, yInterval, fN=20):
""" Priprava grafa in skiciranje funkcije. """
self.f = f
self.xlim = xInterval
self.ylim = yInterval
self.fN = fN
self.runs = []
x = np.linspace(self.xlim[0], self.xlim[1], 30)
y = np.linspace(self.ylim[0], self.ylim[1], 30)
X, Y = np.meshgrid(x, y)
fxy = np.zeros(X.shape)
for i in range(len(fxy)):
for j in range(len(fxy[0])):
fxy[i,j] = self.f([X[i,j], Y[i,j]])
self.fig = plt.figure()
self.ax = self.fig.add_subplot(projection='3d')
self.ax.plot_surface(X, Y, fxy, cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha=0.5)
self.ax.set_xlabel('x')
self.ax.set_ylabel('y')
self.ax.set_zlabel('f(x,y)')
self.ax.set_xlim(self.xlim)
self.ax.set_ylim(self.ylim)
zlim = [np.amin(fxy), np.amax(fxy)]
self.zlim = (zlim[0]-0.1*abs(zlim[1]-zlim[0]), zlim[1]+0.1*abs(zlim[1]-zlim[0]))
self.ax.set_zlim(self.zlim)
def racunaj(self, metoda, x0, y0, par, N=10, eps=1e-3, konv=False):
""" Priročna funkcija za iteriranje oz. večkratno korakanje.
Funkcija se lahko uporablja za končno število korakov: konv = False,
ali pa dokler ne konvergira za dano vrednost eps: konv = True """
tabPoints = []
count = 0
if konv:
minimum = self.f([x0, y0])
while True and count < 1000:
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
fxyN = self.f([x0, y0])
if abs(minimum-fxyN) < eps: break
minimum = min(minimum, fxyN)
count += 1
else:
for i in range(N+1):
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
count += 1
self.runs.append( tabPoints )
print((x0, y0), self.f([x0, y0]), count)
return x0, y0
def zacetekAnimacije(self):
""" Podmetoda za zacetek animacije. """
self.fig.suptitle("0")
self.artists = []
artists = []
for j in range(len(self.runs)):
sc, = self.ax.plot( self.runs[j][0][0], self.runs[j][0][1], self.runs[j][0][2], linestyle="", marker="o" )
self.artists.append( sc )
artists.append(sc)
return artists
def animiraj(self, i):
""" Podmetoda za animiranje. """
self.fig.suptitle(str(i))
artists = []
for j in range(len(self.runs)):
col = self.artists[j].get_color()
if i == len(self.runs[j])-1:
vline = self.ax.plot([self.runs[j][-1][0],self.runs[j][-1][0]], [self.runs[j][-1][1],self.runs[j][-1][1]], [self.zlim[0], self.zlim[1]], linestyle="--", color=col)
artists.append(vline)
elif i >= len(self.runs[j]): continue
if self.verbose == 0:
self.artists[j].set_data( self.runs[j][i][0], self.runs[j][i][1])
self.artists[j].set_3d_properties( self.runs[j][i][2] )
artists.append( self.artists[j] )
elif self.verbose == 1:
arw = self.ax.quiver( self.runs[j][i-1][0], self.runs[j][i-1][1], self.runs[j][i-1][2], self.runs[j][i][0]-self.runs[j][i-1][0], self.runs[j][i][1]-self.runs[j][i-1][1], self.runs[j][i][2]-self.runs[j][i-1][2], color=col)
self.artists.append( arw )
artists.append(arw)
return artists
def maxIteration(self):
""" Podmetoda za izračun števila slik. """
maxN = 0
for i in range(len(self.runs)):
maxN = max(maxN, len(self.runs[i]))
return maxN
def narisi(self, casAnimacije=500, verbose=0, save=False):
""" Funkcija za risanje animacij. """
self.verbose = verbose
ani = animation.FuncAnimation(self.fig, self.animiraj, np.arange(1, self.maxIteration()), interval=casAnimacije, init_func=self.zacetekAnimacije, repeat=False)
if save != False: ani.save(save+".gif", dpi=80, writer="imagemagick")
plt.show() | [
"numpy.amin",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.meshgrid",
"numpy.amax",
"matplotlib.pyplot.show"
] | [((386, 429), 'numpy.linspace', 'np.linspace', (['self.xlim[0]', 'self.xlim[1]', '(30)'], {}), '(self.xlim[0], self.xlim[1], 30)\n', (397, 429), True, 'import numpy as np\n'), ((442, 485), 'numpy.linspace', 'np.linspace', (['self.ylim[0]', 'self.ylim[1]', '(30)'], {}), '(self.ylim[0], self.ylim[1], 30)\n', (453, 485), True, 'import numpy as np\n'), ((501, 518), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (512, 518), True, 'import numpy as np\n'), ((533, 550), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (541, 550), True, 'import numpy as np\n'), ((698, 710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (708, 710), True, 'import matplotlib.pyplot as plt\n'), ((4477, 4487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4485, 4487), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1070), 'numpy.amin', 'np.amin', (['fxy'], {}), '(fxy)\n', (1065, 1070), True, 'import numpy as np\n'), ((1072, 1084), 'numpy.amax', 'np.amax', (['fxy'], {}), '(fxy)\n', (1079, 1084), True, 'import numpy as np\n')] |
'''
本模块用于数据预处理
This module is used for data preproccessing
'''
import numpy as np
from maysics.utils import e_distances
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
from io import BytesIO
from lxml import etree
import base64
import math
def _rc(arg):
cov_mat = np.cov(arg)
var_mat = np.diagonal(cov_mat)**0.5
var_mat[var_mat == 0] = 1
for i in range(cov_mat.shape[0]):
cov_mat[i] /= var_mat[i]
cov_mat[:, i] /= var_mat[i]
return cov_mat
def _preview_process(data, value_round):
'''
预览处理
'''
data = np.array(data, dtype=float)
name_list = ['平均值', '中位数', '方差', '标准差', '最大值', '最小值', '偏度', '峰度']
value_list = []
mean_ = data.mean(axis=0)
value_list.append(np.round(mean_, value_round))
value_list.append(np.round(np.median(data, axis=0), value_round))
value_list.append(np.round(data.var(axis=0), value_round))
value_list.append(np.round(data.std(axis=0), value_round))
value_list.append(np.round(data.max(axis=0), value_round))
value_list.append(np.round(data.min(axis=0), value_round))
value_list.append(np.round(((data - mean_)**3).mean(axis=0), value_round))
value_list.append(np.round(((data - mean_)**4).mean(axis=0), value_round))
value_list = np.array(value_list).flatten()
style = '''
<style>
table{
border-collapse: collapse;
}
table, table tr td {
border:1px solid #ccc;
}
table tr td{
padding: 5px 10px;
}
</style>
'''
table = '<h2 style="padding-left:50px; border-top:1px solid #ccc">数值特征</h2>' + style + '<table align="center"><caption></caption>'
for i in range(8):
table += '<tr><td>' + name_list[i] + '</td>' + '<td>%s</td>' * data.shape[1] + '</tr>'
table = '<h1 style="padding-left:50px;">数据信息</h1>' + table % tuple(value_list) + '</table>'
data = np.ascontiguousarray(data.T)
num = data.shape[0]
plt.figure(figsize=(9, 3 * num))
for i in range(num):
q1, q2, q3 = np.percentile(data[i], [25, 50, 75])
plt.scatter(mean_[i], i+1, marker='o', color='white', s=30, zorder=3)
plt.hlines(i+1, q1, q3, color='k', linestyle='-', lw=1)
bx = plt.violinplot(data.tolist(), showextrema=False, vert=False)
plt.title('分布图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im1 = '<div align="center"><img src="%s"></div>' % imd
im1 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">密度分布</h2>' + im1
cov_mat = _rc(data)
matrix = '<table border="0"><caption></caption>'
for i in range(num):
matrix += '<tr>' + '<td>%s</td>' * num + '</tr>'
matrix = matrix % tuple(np.round(cov_mat.flatten(), value_round)) + '</table>'
plt.figure(figsize=(8, 8))
plt.matshow(cov_mat, fignum=0, cmap='Blues')
plt.colorbar()
plt.title('相关系数图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im2 = '<div style="display:flex;flex-direction:row;vertical-align:middle;justify-content:center;width:100%;height:80vh"><div style="margin:auto 0;white-space:pre-wrap;max-width:50%">'
im2 = im2 +'相关矩阵:'+ matrix + '</div><img style="object-fit:contain;max-width:45%;max-height:80vh" src="{}"/></div>'.format(imd)
im2 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">相关性</h2>' + im2
plt.figure(figsize=(2.5 * num, 2.5 * num))
for i in range(num * num):
ax = plt.subplot(num, num, i+1)
ax.plot(data[i//num], data[i%num], 'o')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = "data:image/png;base64," + ims
im3 = '<div align="center"><img src="%s"></div>' % imd
im3 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">散点关系</h2>' + im3
return '<title>数据信息预览</title>' + table + im1 + im2 + im3
def preview_file(filename, data, value_round=3):
'''
生成数据预览报告的html文件
参数
----
filename:字符串类型,文件名
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Generate preview report with html file
Parameters
----------
filename: str, file name
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
html = etree.HTML(root)
tree = etree.ElementTree(html)
tree.write(filename)
def preview(data, value_round=3):
'''
在jupyter中显示数据预览报告
参数
----
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Display preview report in jupyter
Parameters
----------
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
from IPython.core.display import display, HTML
display(HTML(root))
def length_pad(seq, maxlen=None, value=0, padding='pre', dtype=float):
'''
填充二维列表,使得每行长度都为maxlen
参数
----
seq:二维列表,需要填充的对象
maxlen:整型,可选,每行的最大长度,默认为原二维列表最大的长度
value:数类型,可选,填充值,默认为0
padding:字符串类型,可选,填充位置,'pre'代表从前面填充,'post'代表从后面填充,默认为'pre'
dtype:可选,输出的元素类型,默认为float
返回
----
二维ndarray
Pad the 2-D list so that every row is 'maxlen' in length
Parameters
----------
seq: 2-D list, objects that need to be padded
maxlen: int, callable, the maximum length of each row, default = the maximum length of the original 2-D list
value: num, callable, padding value, default=0
padding: str, callable, padding location, 'pre' means padding from the front and 'post' from the back, default='pre'
dtype: callable, the element type of the output, default=float
Return
------
2-D ndarray
'''
seq = list(seq)
if not maxlen:
maxlen = 0
for i in seq:
if len(i) > maxlen:
maxlen = len(i)
if padding == 'pre':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] = [value] * (maxlen - len(seq[i])) + seq[i]
elif maxlen < len(seq[i]):
seq[i] = seq[i][-1 * maxlen:]
elif padding == 'post':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] += [value] * (maxlen - len(seq[i]))
elif maxlen < len(seq[i]):
seq[i] = seq[i][:maxlen]
return np.array(seq, dtype=dtype)
def sample_pad(data, index=0, padding=None):
'''
对二维数据进行样本填充
先对data中的每个二维数据进行遍历,以各个index列的值作为全集,再对data的每个二维数据进行填充
如:data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
则得到输出:
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data:元组或列表类型,数据
index:整型,作为扩充全集的标准列的索引
padding:填充值,可选,默认为None
Sample filling for 2D data
Values of each index column will be taken as the complete set, then each two-dimensional data of data is padded
e.g. data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data: tuple or list, data
index: int, the index of a standard column as an extended complete set
padding: padding value, optional, default=None
'''
time_set = set()
result = []
if not padding:
padding = [np.nan] * (len(data[0][0]) - 1)
else:
padding = list([padding])
for i in range(len(data)):
data_part = np.array(data[i], dtype=np.object)
result.append(data_part)
time_set = time_set | set(data_part[:, index])
for i in range(len(result)):
different_set_list = np.array([list(time_set - set(result[i][:, index]))], dtype=np.object).T
num = len(different_set_list)
padding_new = np.array(padding * num, dtype=np.object).reshape(num, -1)
different_set_list = np.hstack((padding_new[:, :index], different_set_list, padding_new[:, index:]))
result[i] = np.vstack((result[i], different_set_list))
return result
def shuffle(*arg):
'''
打乱一个序列或以相同方法打乱多个序列
返回
----
一个ndarray
Shuffle a sequence or shuffle multiple sequences in the same way
Return
------
a ndarray
'''
state = np.random.get_state()
a_new_list = []
for li in arg:
np.random.set_state(state)
np.random.shuffle(li)
a_new_list.append(li)
return np.array(a_new_list)
def data_split(data, targets, train_size=None, test_size=None, shuffle=True, random_state=None):
'''
分离数据
参数
----
data:数据
targets:指标
train_size:浮点数类型,可选,训练集占总数据量的比,取值范围为(0, 1],默认为0.75
test_size:浮点数类型,可选,测试集占总数据量的比,取值范围为[0, 1),当train_size被定义时,该参数无效
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
split the data
Parameters
----------
data: data
targets: targets
train_size: float, callable, ratio of training set to total data, value range is (0, 1], default=0.75
test_size: float, callable, ratio of test set to total data, value range is [0, 1)
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
if not (train_size or test_size):
train_size = 0.75
elif test_size:
train_size = 1 - test_size
if train_size <= 0 or train_size > 1:
raise Exception("'train_size' should be in (0, 1], 'test_size' should be in [0, 1)")
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num_of_data = len(data)
train_data = data[:int(num_of_data * train_size)]
train_target = targets[:int(num_of_data * train_size)]
validation_data = data[int(num_of_data * train_size):]
validation_target = targets[int(num_of_data * train_size):]
return train_data, train_target, validation_data, validation_target
def kfold(data, targets, n, k=5):
'''
参数
----
data:数据
targets:指标
n:整型,表示将第n折作为验证集,从0开始
k:整型,可选,k折验证的折叠数,默认k=5
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
Parameters
----------
data: data
targets: targets
n: int, take the nth part as validation set, starting from 0
k: int, callable, the number of k-fold, default = 5
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
num_validation_samples = len(data) // k
validation_data = data[num_validation_samples * n:
num_validation_samples * (n + 1)]
validation_targets = targets[num_validation_samples * n:
num_validation_samples * (n + 1)]
train_data = np.concatenate([data[: num_validation_samples * n],
data[num_validation_samples * (n + 1):]])
train_targets = np.concatenate([targets[: num_validation_samples * n],
targets[num_validation_samples * (n + 1):]])
return train_data, train_targets, validation_data, validation_targets
def dataloader(data, targets, choose_rate=0.3, shuffle=True, random_state=None):
'''
数据随机生成器
参数
----
data:数据
targets:指标
choose_rate:浮点数类型,可选,生成率,即一次生成数据量在原数据量的占比,范围为[0, 1],默认为0.3
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
生成器
Data Random Generator
Parameters
----------
data: data
targets: targets
choose_rate: float, callable, generation rate (the proportion of data generated at one time in the original data) whose range is [0, 1], default=0.3
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
generator
'''
data = np.array(data)
targets = np.array(targets)
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num = len(data)
choose_rate = int(num * choose_rate)
times = int(math.ceil(num / choose_rate))
for i in range(times):
loc_1 = i * choose_rate
loc_2 = (i + 1) * choose_rate
yield data[loc_1: loc_2], targets[loc_1: loc_2]
def standard(data, mean=True, var=True, index=None):
'''
标准化数据
z = (x - u) / s
z:新数据; x:原数据; u:均值; s:方差
如果某一列数据完全相同(即方差s=0),则该列数据全部归零
参数
----
data:2-D的ndarray数据
mean:布尔类型或ndarray,可选,布尔类型决定是否将均值调整为0,ndarray用于设定不同的均值
var:布尔类型或ndarray,可选,是否将方差调整为1,ndarray用于设定不同的方差
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
tuple
Standardize data
z = (x - u) / s
z: new data; x: origin data; u: mean value; s: variance
if data in one column are the same(s=0), data in this column will be turned to 0
Parameters
----------
data: 2-D ndarray
mean: bool or ndarray, callable, bool decides if adjust the mean value to 0, ndarray is used to set different means
var: bool or ndarray, callable, bool decides if adjust the variance to 0, ndarray is used to set different variances
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data=np.array(data, dtype=float)
if index:
if mean is True:
mean = data[:, index].mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(len(index))
data[:, index] -= mean
if not var is None and not var is False:
if var is True:
var = data[:, index].std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data[:, index] /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, index][:, i] *= 0
else:
std = 0
else:
if mean is True:
mean = data.mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(data.shape[1])
else:
mean = np.array(mean)
data -= mean
if not var is None and not var is False:
if var is True:
var = data.std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, i] *= 0
else:
std = 0
return data, mean, std
def minmax(data, feature_range=(0, 1), min_max=None, index=None):
'''
归一化数据
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
参数
----
data:2-D的ndarray数据
feature_range:元组类型,可选,需要转换的范围,默认为(0, 1)
min_max:元组类型,可选,用于设定最大最小值
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
元组,(归一化后的数据, (最小值,最大值))
Normalize data
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
Parameters
----------
data: 2-D的ndarray数据
feature_range: tuple, callabel, final range of transformed data
min_max: tuple, callable, set min and max values
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
tuple, (Normalized data, (miniment, maximent))
'''
data=np.array(data, dtype=float)
if index:
if not min_max:
min_max = (data[:, index].min(axis=0), data[:, index].max(axis=0))
length = min_max[1] - min_max[0]
data[:, index] = (data[:, index] - min_max[0]) / length
data[:, index] = data[:, index] * (feature_range[1] - feature_range[0]) + feature_range[0]
else:
if not min_max:
min_max = (data.min(axis=0), data.max(axis=0))
length = min_max[1] - min_max[0]
data = (data - min_max[0]) / length
data = data * (feature_range[1] - feature_range[0]) + feature_range[0]
return data, min_max
def normalizer(data, index=None):
'''
使每个数据的模为1
参数
----
data:2-D的ndarray数据
index:列表形式,可选,需要进行标准化的列的索引,默认为全部
返回
----
2-D ndarray
Making the moduli of data equal 1
Parameters
----------
data: 2-D的ndarray数据
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data = np.array(data, dtype=float)
if index:
distance_list = e_distances(data[:, index])
distance_list[distance_list == 0] = 1
data[:, index] /= np.array([distance_list]).T
else:
distance_list = e_distances(data)
distance_list[distance_list == 0] = 1
data /= np.array([distance_list]).T
return data
def pca(data, n=None, eig_vector=None):
'''
主成分分析
参数
----
data:二维数组,数据
n:整型或浮点数类型,可选,当n >= 1时,表示降至n维,当0< n < 1时,表示降维至累计方差贡献率 >= n,默认不降维
eig_vector:元组类型,可选,用于设定不同的特征向量,当设置该参数时,累计方差贡献率无意义,默认为None
返回
----
元组,(转换后的数据, (累计方差贡献率, 各主成分方差, 各主成分方差贡献率), 特征向量)
Principal Component Analysis
Parameters
----------
data:2-D array, data
n: int or float, callable, when n > = 1, it means to reduce the dimension to n; when 0 < n < 1, it means to reduce the dimension to cumulative variance ratio > = n, and it is not reduced by default
eig_value_vector: tuple, callable, set different eigenvectors, when this parameter is set, the cumulative variance ratio is meaningless, default=None
Return
------
tuple, (transformed data, (cumulative variance ratio, variance, variance ratio), eigenvectors)
'''
data = np.array(data, dtype=float)
if eig_vector is None or eig_vector is False:
cov_mat = np.cov(data.T)
eig_value, eig_vector = np.linalg.eig(cov_mat)
sort_index = np.flipud(np.argsort(eig_value))
eig_value = eig_value[sort_index]
eig_vector = eig_vector[:, sort_index]
eig_ratio = eig_value / eig_value.sum()
contri = 0
if not n is None and not n is False:
if n >= 1:
n = int(n)
contri = eig_ratio[:n].sum()
elif n < 1:
for i in range(eig_value.shape[0]):
contri += eig_ratio[i]
if contri >= n:
n = i + 1
break
eig_value = eig_value[:n]
eig_ratio = eig_ratio[:n]
eig_vector = eig_vector[:, :n]
else:
contri = 1
else:
contri = None
eig_value = None
eig_ratio = None
data = np.dot(data, eig_vector)
return data, (contri, eig_value, eig_ratio), eig_vector
class RC():
'''
相关系数
参数
----
*arg:列表类型
属性
----
rc_mat:相关系数矩阵
correlation coefficient
Parameter
---------
*arg: list
Attribute
---------
rc_mat: correlation coefficient matrix
'''
def __init__(self, *arg):
arg = np.array(arg, dtype=float)
if len(arg.shape) != 2:
raise Exception("Input list should be 1-D.")
else:
self.rc_mat = _rc(arg)
def __img_process(self, index, cmap):
plt.matshow(self.rc_mat, cmap=cmap)
plt.colorbar()
if index:
n_list = range(len(index))
plt.xticks(n_list, index)
plt.yticks(n_list, index)
def show(self, index=None, cmap='Blues'):
'''
作图并显示
参数
----
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Display the image
Parameters
----------
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.show()
def savefig(self, filename, index=None, cmap='Blues'):
'''
作图并保存
参数
----
filename:字符串形式,文件名
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Save the image
Parameters
----------
filename: str, file name
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.savefig(filename) | [
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.hstack",
"base64.b64encode",
"io.BytesIO",
"numpy.ascontiguousarray",
"maysics.utils.e_distances",
"numpy.array",
"numpy.argsort",
"lxml.etree.HTML",
"numpy.cov",
"lxml.etree.ElementTree",
"IPython.core.display.HTML",
"matplotlib.p... | [((350, 361), 'numpy.cov', 'np.cov', (['arg'], {}), '(arg)\n', (356, 361), True, 'import numpy as np\n'), ((647, 674), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (655, 674), True, 'import numpy as np\n'), ((1968, 1996), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data.T'], {}), '(data.T)\n', (1988, 1996), True, 'import numpy as np\n'), ((2025, 2057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3 * num)'}), '(figsize=(9, 3 * num))\n', (2035, 2057), True, 'from matplotlib import pyplot as plt\n'), ((2357, 2373), 'matplotlib.pyplot.title', 'plt.title', (['"""分布图"""'], {}), "('分布图')\n", (2366, 2373), True, 'from matplotlib import pyplot as plt\n'), ((2392, 2401), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2399, 2401), False, 'from io import BytesIO\n'), ((2406, 2425), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (2417, 2425), True, 'from matplotlib import pyplot as plt\n'), ((2430, 2441), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2439, 2441), True, 'from matplotlib import pyplot as plt\n'), ((2486, 2513), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (2502, 2513), False, 'import base64\n'), ((2997, 3023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3007, 3023), True, 'from matplotlib import pyplot as plt\n'), ((3028, 3072), 'matplotlib.pyplot.matshow', 'plt.matshow', (['cov_mat'], {'fignum': '(0)', 'cmap': '"""Blues"""'}), "(cov_mat, fignum=0, cmap='Blues')\n", (3039, 3072), True, 'from matplotlib import pyplot as plt\n'), ((3077, 3091), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3089, 3091), True, 'from matplotlib import pyplot as plt\n'), ((3096, 3114), 'matplotlib.pyplot.title', 'plt.title', (['"""相关系数图"""'], {}), "('相关系数图')\n", (3105, 3114), True, 'from matplotlib import pyplot as plt\n'), ((3133, 3142), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3140, 3142), False, 'from io import BytesIO\n'), ((3147, 3166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (3158, 3166), True, 'from matplotlib import pyplot as plt\n'), ((3171, 3182), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3180, 3182), True, 'from matplotlib import pyplot as plt\n'), ((3227, 3254), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (3243, 3254), False, 'import base64\n'), ((3746, 3788), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5 * num, 2.5 * num)'}), '(figsize=(2.5 * num, 2.5 * num))\n', (3756, 3788), True, 'from matplotlib import pyplot as plt\n'), ((3926, 3935), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3933, 3935), False, 'from io import BytesIO\n'), ((3940, 3959), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (3951, 3959), True, 'from matplotlib import pyplot as plt\n'), ((3964, 3975), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3973, 3975), True, 'from matplotlib import pyplot as plt\n'), ((4020, 4047), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (4036, 4047), False, 'import base64\n'), ((4827, 4843), 'lxml.etree.HTML', 'etree.HTML', (['root'], {}), '(root)\n', (4837, 4843), False, 'from lxml import etree\n'), ((4855, 4878), 'lxml.etree.ElementTree', 'etree.ElementTree', (['html'], {}), '(html)\n', (4872, 4878), False, 'from lxml import etree\n'), ((6955, 6981), 'numpy.array', 'np.array', (['seq'], {'dtype': 'dtype'}), '(seq, dtype=dtype)\n', (6963, 6981), True, 'import numpy as np\n'), ((9637, 9658), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (9656, 9658), True, 'import numpy as np\n'), ((9804, 9824), 'numpy.array', 'np.array', (['a_new_list'], {}), '(a_new_list)\n', (9812, 9824), True, 'import numpy as np\n'), ((10799, 10813), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (10807, 10813), True, 'import numpy as np\n'), ((10828, 10845), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (10836, 10845), True, 'import numpy as np\n'), ((12167, 12181), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (12175, 12181), True, 'import numpy as np\n'), ((12196, 12213), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (12204, 12213), True, 'import numpy as np\n'), ((12529, 12626), 'numpy.concatenate', 'np.concatenate', (['[data[:num_validation_samples * n], data[num_validation_samples * (n + 1):]]'], {}), '([data[:num_validation_samples * n], data[\n num_validation_samples * (n + 1):]])\n', (12543, 12626), True, 'import numpy as np\n'), ((12676, 12779), 'numpy.concatenate', 'np.concatenate', (['[targets[:num_validation_samples * n], targets[num_validation_samples * (n +\n 1):]]'], {}), '([targets[:num_validation_samples * n], targets[\n num_validation_samples * (n + 1):]])\n', (12690, 12779), True, 'import numpy as np\n'), ((13671, 13685), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13679, 13685), True, 'import numpy as np\n'), ((13700, 13717), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (13708, 13717), True, 'import numpy as np\n'), ((15202, 15229), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (15210, 15229), True, 'import numpy as np\n'), ((17639, 17666), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (17647, 17666), True, 'import numpy as np\n'), ((18714, 18741), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (18722, 18741), True, 'import numpy as np\n'), ((19987, 20014), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (19995, 20014), True, 'import numpy as np\n'), ((21017, 21041), 'numpy.dot', 'np.dot', (['data', 'eig_vector'], {}), '(data, eig_vector)\n', (21023, 21041), True, 'import numpy as np\n'), ((376, 396), 'numpy.diagonal', 'np.diagonal', (['cov_mat'], {}), '(cov_mat)\n', (387, 396), True, 'import numpy as np\n'), ((822, 850), 'numpy.round', 'np.round', (['mean_', 'value_round'], {}), '(mean_, value_round)\n', (830, 850), True, 'import numpy as np\n'), ((2104, 2140), 'numpy.percentile', 'np.percentile', (['data[i]', '[25, 50, 75]'], {}), '(data[i], [25, 50, 75])\n', (2117, 2140), True, 'import numpy as np\n'), ((2149, 2220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_[i]', '(i + 1)'], {'marker': '"""o"""', 'color': '"""white"""', 's': '(30)', 'zorder': '(3)'}), "(mean_[i], i + 1, marker='o', color='white', s=30, zorder=3)\n", (2160, 2220), True, 'from matplotlib import pyplot as plt\n'), ((2227, 2284), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(i + 1)', 'q1', 'q3'], {'color': '"""k"""', 'linestyle': '"""-"""', 'lw': '(1)'}), "(i + 1, q1, q3, color='k', linestyle='-', lw=1)\n", (2237, 2284), True, 'from matplotlib import pyplot as plt\n'), ((3833, 3861), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num', 'num', '(i + 1)'], {}), '(num, num, i + 1)\n', (3844, 3861), True, 'from matplotlib import pyplot as plt\n'), ((5387, 5397), 'IPython.core.display.HTML', 'HTML', (['root'], {}), '(root)\n', (5391, 5397), False, 'from IPython.core.display import display, HTML\n'), ((8834, 8868), 'numpy.array', 'np.array', (['data[i]'], {'dtype': 'np.object'}), '(data[i], dtype=np.object)\n', (8842, 8868), True, 'import numpy as np\n'), ((9244, 9323), 'numpy.hstack', 'np.hstack', (['(padding_new[:, :index], different_set_list, padding_new[:, index:])'], {}), '((padding_new[:, :index], different_set_list, padding_new[:, index:]))\n', (9253, 9323), True, 'import numpy as np\n'), ((9344, 9386), 'numpy.vstack', 'np.vstack', (['(result[i], different_set_list)'], {}), '((result[i], different_set_list))\n', (9353, 9386), True, 'import numpy as np\n'), ((9706, 9732), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (9725, 9732), True, 'import numpy as np\n'), ((9741, 9762), 'numpy.random.shuffle', 'np.random.shuffle', (['li'], {}), '(li)\n', (9758, 9762), True, 'import numpy as np\n'), ((11134, 11162), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (11148, 11162), True, 'import numpy as np\n'), ((11179, 11200), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (11198, 11200), True, 'import numpy as np\n'), ((11209, 11232), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (11226, 11232), True, 'import numpy as np\n'), ((11241, 11267), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (11260, 11267), True, 'import numpy as np\n'), ((11276, 11302), 'numpy.random.shuffle', 'np.random.shuffle', (['targets'], {}), '(targets)\n', (11293, 11302), True, 'import numpy as np\n'), ((13747, 13775), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (13761, 13775), True, 'import numpy as np\n'), ((13792, 13813), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (13811, 13813), True, 'import numpy as np\n'), ((13822, 13845), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (13839, 13845), True, 'import numpy as np\n'), ((13854, 13880), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (13873, 13880), True, 'import numpy as np\n'), ((13889, 13915), 'numpy.random.shuffle', 'np.random.shuffle', (['targets'], {}), '(targets)\n', (13906, 13915), True, 'import numpy as np\n'), ((13993, 14021), 'math.ceil', 'math.ceil', (['(num / choose_rate)'], {}), '(num / choose_rate)\n', (14002, 14021), False, 'import math\n'), ((18785, 18812), 'maysics.utils.e_distances', 'e_distances', (['data[:, index]'], {}), '(data[:, index])\n', (18796, 18812), False, 'from maysics.utils import e_distances\n'), ((18952, 18969), 'maysics.utils.e_distances', 'e_distances', (['data'], {}), '(data)\n', (18963, 18969), False, 'from maysics.utils import e_distances\n'), ((20088, 20102), 'numpy.cov', 'np.cov', (['data.T'], {}), '(data.T)\n', (20094, 20102), True, 'import numpy as np\n'), ((20135, 20157), 'numpy.linalg.eig', 'np.linalg.eig', (['cov_mat'], {}), '(cov_mat)\n', (20148, 20157), True, 'import numpy as np\n'), ((21426, 21452), 'numpy.array', 'np.array', (['arg'], {'dtype': 'float'}), '(arg, dtype=float)\n', (21434, 21452), True, 'import numpy as np\n'), ((21651, 21686), 'matplotlib.pyplot.matshow', 'plt.matshow', (['self.rc_mat'], {'cmap': 'cmap'}), '(self.rc_mat, cmap=cmap)\n', (21662, 21686), True, 'from matplotlib import pyplot as plt\n'), ((21695, 21709), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21707, 21709), True, 'from matplotlib import pyplot as plt\n'), ((22294, 22304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22302, 22304), True, 'from matplotlib import pyplot as plt\n'), ((22826, 22847), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (22837, 22847), True, 'from matplotlib import pyplot as plt\n'), ((883, 906), 'numpy.median', 'np.median', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (892, 906), True, 'import numpy as np\n'), ((1349, 1369), 'numpy.array', 'np.array', (['value_list'], {}), '(value_list)\n', (1357, 1369), True, 'import numpy as np\n'), ((15656, 15676), 'numpy.nonzero', 'np.nonzero', (['(var == 0)'], {}), '(var == 0)\n', (15666, 15676), True, 'import numpy as np\n'), ((16381, 16401), 'numpy.nonzero', 'np.nonzero', (['(var == 0)'], {}), '(var == 0)\n', (16391, 16401), True, 'import numpy as np\n'), ((18885, 18910), 'numpy.array', 'np.array', (['[distance_list]'], {}), '([distance_list])\n', (18893, 18910), True, 'import numpy as np\n'), ((19032, 19057), 'numpy.array', 'np.array', (['[distance_list]'], {}), '([distance_list])\n', (19040, 19057), True, 'import numpy as np\n'), ((20198, 20219), 'numpy.argsort', 'np.argsort', (['eig_value'], {}), '(eig_value)\n', (20208, 20219), True, 'import numpy as np\n'), ((21779, 21804), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_list', 'index'], {}), '(n_list, index)\n', (21789, 21804), True, 'from matplotlib import pyplot as plt\n'), ((21817, 21842), 'matplotlib.pyplot.yticks', 'plt.yticks', (['n_list', 'index'], {}), '(n_list, index)\n', (21827, 21842), True, 'from matplotlib import pyplot as plt\n'), ((9157, 9197), 'numpy.array', 'np.array', (['(padding * num)'], {'dtype': 'np.object'}), '(padding * num, dtype=np.object)\n', (9165, 9197), True, 'import numpy as np\n'), ((15611, 15624), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (15619, 15624), True, 'import numpy as np\n'), ((16078, 16101), 'numpy.zeros', 'np.zeros', (['data.shape[1]'], {}), '(data.shape[1])\n', (16086, 16101), True, 'import numpy as np\n'), ((16135, 16149), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (16143, 16149), True, 'import numpy as np\n'), ((16336, 16349), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (16344, 16349), True, 'import numpy as np\n')] |
import requests
from bs4 import BeautifulSoup
from time import sleep
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
def parse(url):
print('Parsing..' + url)
return 'Parsed..' + url
def pull(category_url):
total_pages = 0
total_links = []
try:
print('Processing...' + category_url)
r = requests.get(category_url, headers=headers, timeout=5)
if r.status_code == 200:
html = r.text.strip()
soup = BeautifulSoup(html, 'lxml')
# Find total pages
pagination_section = soup.select('.pagination li > a')
if pagination_section:
# -2 because the last is NEXT button
total_pages = int(pagination_section[len(pagination_section) - 2].text)
links = soup.select('.products .link')
# results = [parse(l['href']) for l in links]
for l in links:
total_links.append(l['href'])
for x in range(2, total_pages + 1):
sleep(2)
cat_url = 'https://www.daraz.pk/mens-smart-watches/?page={0}'.format(x)
print('Processing...' + cat_url)
r = requests.get(category_url, headers=headers, timeout=5)
if r.status_code == 200:
links = soup.select('.products .link')
[total_links.append(l['href']) for l in links]
except requests.ConnectionError as e:
print("OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\n")
print(str(e))
except requests.Timeout as e:
print("OOPS!! Timeout Error")
print(str(e))
except requests.RequestException as e:
print("OOPS!! General Error")
print(str(e))
except KeyboardInterrupt:
print("Someone closed the program")
finally:
# Save links into file
if len(total_links) > 0:
with open('links.txt', 'a+', encoding='utf=8') as f:
f.write('\n'.join(total_links))
if __name__ == '__main__':
cat_url = 'https://www.daraz.pk/mens-smart-watches/'
pull(cat_url)
with open('links.txt',encoding='utf-8') as f:
lines = f.readlines()
for l in lines:
parse(l)
| [
"bs4.BeautifulSoup",
"time.sleep",
"requests.get"
] | [((434, 488), 'requests.get', 'requests.get', (['category_url'], {'headers': 'headers', 'timeout': '(5)'}), '(category_url, headers=headers, timeout=5)\n', (446, 488), False, 'import requests\n'), ((575, 602), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (588, 602), False, 'from bs4 import BeautifulSoup\n'), ((1125, 1133), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1130, 1133), False, 'from time import sleep\n'), ((1291, 1345), 'requests.get', 'requests.get', (['category_url'], {'headers': 'headers', 'timeout': '(5)'}), '(category_url, headers=headers, timeout=5)\n', (1303, 1345), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
import os
import sys
import re
import settings
from auth.connector import TrelloConnector
from stats import summary
from stats.trelloboardconfiguration import TrelloBoardConfiguration
def extract_stats(configuration_file_path):
"""
Extract stats for a given configuration file that defines a trello board and other settings.
:param configuration_file_path: file path where the configuration file is.
"""
configuration = TrelloBoardConfiguration.load_from_file(configuration_file_path)
summary.make(trello_connector, configuration)
def file_is_configuration_file(_file_name):
return re.match(r"^[^\.]+\.conf\.txt", _file_name)
if __name__ == "__main__":
api_key = settings.TRELLO_API_KEY
api_secret = settings.TRELLO_API_SECRET
token = settings.TRELLO_TOKEN
token_secret = settings.TRELLO_TOKEN_SECRET
trello_connector = TrelloConnector(api_key, api_secret, token, token_secret)
if len(sys.argv) < 2:
raise ValueError(u"Error. Use python stats_extractor.py <configuration_file_path>")
# Configuration file path
configuration_path = sys.argv[1]
# If configuration path is a file, extract stats of the board written in this file
if os.path.isfile(configuration_path):
extract_stats(configuration_path)
# Otherwise, if configuration path is a directory, loop through directory files and extract stats
# for each of these files
elif os.path.isdir(configuration_path):
for file_name in os.listdir(configuration_path):
if file_is_configuration_file(file_name):
print(u"Processing {0}".format(file_name))
extract_stats(u"{0}/{1}".format(configuration_path, file_name))
| [
"os.listdir",
"auth.connector.TrelloConnector",
"stats.trelloboardconfiguration.TrelloBoardConfiguration.load_from_file",
"re.match",
"os.path.isfile",
"os.path.isdir",
"stats.summary.make"
] | [((468, 532), 'stats.trelloboardconfiguration.TrelloBoardConfiguration.load_from_file', 'TrelloBoardConfiguration.load_from_file', (['configuration_file_path'], {}), '(configuration_file_path)\n', (507, 532), False, 'from stats.trelloboardconfiguration import TrelloBoardConfiguration\n'), ((537, 582), 'stats.summary.make', 'summary.make', (['trello_connector', 'configuration'], {}), '(trello_connector, configuration)\n', (549, 582), False, 'from stats import summary\n'), ((640, 685), 're.match', 're.match', (['"""^[^\\\\.]+\\\\.conf\\\\.txt"""', '_file_name'], {}), "('^[^\\\\.]+\\\\.conf\\\\.txt', _file_name)\n", (648, 685), False, 'import re\n'), ((901, 958), 'auth.connector.TrelloConnector', 'TrelloConnector', (['api_key', 'api_secret', 'token', 'token_secret'], {}), '(api_key, api_secret, token, token_secret)\n', (916, 958), False, 'from auth.connector import TrelloConnector\n'), ((1241, 1275), 'os.path.isfile', 'os.path.isfile', (['configuration_path'], {}), '(configuration_path)\n', (1255, 1275), False, 'import os\n'), ((1461, 1494), 'os.path.isdir', 'os.path.isdir', (['configuration_path'], {}), '(configuration_path)\n', (1474, 1494), False, 'import os\n'), ((1521, 1551), 'os.listdir', 'os.listdir', (['configuration_path'], {}), '(configuration_path)\n', (1531, 1551), False, 'import os\n')] |
"""Convenience file to help start the game when the repo is cloned from git rather than installed via pip
This was required as we needed to run the script from the same level as the housie/ package in order for the imports
to work correctly.
"""
from housie.game import display_main_menu
display_main_menu()
| [
"housie.game.display_main_menu"
] | [((290, 309), 'housie.game.display_main_menu', 'display_main_menu', ([], {}), '()\n', (307, 309), False, 'from housie.game import display_main_menu\n')] |
import json
import sys
import urllib.parse
import urllib.request
import os
import zipfile
import io
import csv
import re
from html.parser import HTMLParser
code_list = {}
with open('data/codepoints.csv') as f:
reader = csv.reader(f)
for row in reader:
d1,d2,d3 = row[0].split('-')
d1 = int(d1)
d2 = int(d2)
d3 = int(d3)
code_list['%d-%02d-%02d'%(d1,d2,d3)] = chr(int(row[1], 16))
def get_aozora_urls():
aozora_csv_url = 'https://www.aozora.gr.jp/index_pages/list_person_all_extended_utf8.zip'
xhtml_urls = []
html = urllib.request.urlopen(aozora_csv_url)
with zipfile.ZipFile(io.BytesIO(html.read())) as myzip:
with myzip.open('list_person_all_extended_utf8.csv') as myfile:
reader = csv.reader(io.TextIOWrapper(myfile))
idx = -1
for row in reader:
if idx < 0:
idx = [i for i, x in enumerate(row) if 'URL' in x]
idx = [i for i in idx if 'HTML' in row[i]]
if len(idx) == 0:
exit()
idx = idx[0]
continue
if row[idx].startswith('https://www.aozora.gr.jp/cards/'):
xhtml_urls.append(row[idx])
return xhtml_urls
class MyHTMLParser(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main = False
self.count = 0
self.startpos = (-1,-1)
self.endpos = (-1,-1)
def handle_starttag(self, tag, attrs):
if tag == 'div':
if self.main:
self.count += 1
elif ('class', 'main_text') in attrs:
self.main = True
self.startpos = self.getpos()
def handle_endtag(self, tag):
if tag == 'div':
if self.main:
if self.count == 0:
self.endpos = self.getpos()
else:
self.count -= 1
def get_contents(url):
html = urllib.request.urlopen(url)
contents = html.read().decode('cp932')
parser = MyHTMLParser()
parser.feed(contents)
maintext = []
for lineno, line in enumerate(contents.splitlines()):
if parser.startpos[0] == lineno + 1:
maintext.append(line[parser.startpos[1]:])
elif parser.startpos[0] < lineno + 1 <= parser.endpos[0]:
if parser.endpos[0] == lineno + 1:
if parser.endpos[1] == 0:
pass
else:
maintext.append(line[:parser.endpos[1]])
else:
maintext.append(line)
maintext = '\n'.join(maintext)
maintext = re.sub(r'<ruby><rb>(.*?)</rb>.*?</ruby>', r'\1', maintext)
m = True
while m:
m = re.search(r'<img .*?/(\d-\d\d-\d\d)\.png.*?>', maintext)
if m:
maintext = maintext[:m.start()] + code_list[m.group(1)] + maintext[m.end():]
maintext = re.sub(r'<span class="notes">.*?</span>', r'', maintext)
maintext = re.sub(r'<[^>]*?>', r'', maintext)
return maintext
if __name__ == '__main__':
urls = get_aozora_urls()
for u in urls:
print(u)
print(get_contents(u)) | [
"re.sub",
"io.TextIOWrapper",
"csv.reader",
"re.search"
] | [((224, 237), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (234, 237), False, 'import csv\n'), ((2722, 2779), 're.sub', 're.sub', (['"""<ruby><rb>(.*?)</rb>.*?</ruby>"""', '"""\\\\1"""', 'maintext'], {}), "('<ruby><rb>(.*?)</rb>.*?</ruby>', '\\\\1', maintext)\n", (2728, 2779), False, 'import re\n'), ((2994, 3048), 're.sub', 're.sub', (['"""<span class="notes">.*?</span>"""', '""""""', 'maintext'], {}), '(\'<span class="notes">.*?</span>\', \'\', maintext)\n', (3000, 3048), False, 'import re\n'), ((3066, 3098), 're.sub', 're.sub', (['"""<[^>]*?>"""', '""""""', 'maintext'], {}), "('<[^>]*?>', '', maintext)\n", (3072, 3098), False, 'import re\n'), ((2819, 2880), 're.search', 're.search', (['"""<img .*?/(\\\\d-\\\\d\\\\d-\\\\d\\\\d)\\\\.png.*?>"""', 'maintext'], {}), "('<img .*?/(\\\\d-\\\\d\\\\d-\\\\d\\\\d)\\\\.png.*?>', maintext)\n", (2828, 2880), False, 'import re\n'), ((782, 806), 'io.TextIOWrapper', 'io.TextIOWrapper', (['myfile'], {}), '(myfile)\n', (798, 806), False, 'import io\n')] |
#!/usr/bin/env python
"""
setup.py file for SWIG Interface of Ext
"""
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from os import walk
import numpy
import wget
from setuptools import Extension
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext
try:
# Obtain the numpy include directory. This logic works across numpy versions.
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
def readme():
with open('./README.md') as f:
return f.read()
def download_numpy_interface(path):
"""
Downloads numpy.i
:return: None
"""
print("Download Numpy SWIG Interface")
np_version = re.compile(r'(?P<MAJOR>[0-9]+)\.'
'(?P<MINOR>[0-9]+)') \
.search(numpy.__version__)
np_version_string = np_version.group()
np_version_info = {key: int(value)
for key, value in np_version.groupdict().items()}
np_file_name = 'numpy.i'
np_file_url = 'https://raw.githubusercontent.com/numpy/numpy/maintenance/' + \
np_version_string + '.x/tools/swig/' + np_file_name
if np_version_info['MAJOR'] == 1 and np_version_info['MINOR'] < 9:
np_file_url = np_file_url.replace('tools', 'doc')
wget.download(np_file_url, path)
return
# Download numpy.i if needed
if not os.path.exists('./EggNetExtension/numpy.i'):
print('Downloading numpy.i')
project_dir = os.path.dirname(os.path.abspath(__file__))
download_numpy_interface(path='./EggNetExtension/')
source_files = ['./EggNetExtension/NNExtension.i', './EggNetExtension/cconv.c',
'./EggNetExtension/cpool.c', './EggNetExtension/crelu.c',
'./EggNetExtension/cmatmul.c', './EggNetExtension/chelper.c']
print("************************ SOURCE FILES *************************")
print(source_files)
print("************************ SOURCE FILES *************************")
include_dirs = ['./EggNetExtension/', numpy_include]
# Simple Platform Check (not entirely accurate because here should the compiler be checked)
# ToDo: Should be done better for example via CMake -> https://www.benjack.io/2017/06/12/python-cpp-tests.html
if platform.system() == 'Linux':
extra_args = ['-std=gnu99']
elif platform.system() == 'Darwin':
extra_args = ['--verbose', '-Rpass=loop-vectorize', '-Rpass-analysis=loop-vectorize', '-ffast-math']
elif platform.system() == 'Windows':
# extra_args = ['/Qrestrict', '/W3']
extra_args = []
else:
raise RuntimeError('Operating System not supported?')
extra_link_args = []
NN_ext_module = Extension('EggNetExtension._EggNetExtension',
sources=source_files,
include_dirs=include_dirs,
swig_opts=['-py3'],
extra_compile_args=extra_args,
extra_link_args=extra_link_args,
depends=['numpy'],
optional=False)
setup(name='EggNetExtension',
version='1.0',
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
description="""NN calculation library for python""",
url='https://github.com/marbleton/FPGA_MNIST',
packages=['EggNetExtension'],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst', '*.i', '*.c', '*.h'],
},
ext_modules=[NN_ext_module],
install_requires=['numpy', 'wget', 'idx2numpy'],
)
| [
"numpy.get_numpy_include",
"wget.download",
"os.path.exists",
"re.compile",
"setuptools.setup",
"setuptools.Extension",
"platform.system",
"numpy.get_include",
"os.path.abspath"
] | [((2713, 2939), 'setuptools.Extension', 'Extension', (['"""EggNetExtension._EggNetExtension"""'], {'sources': 'source_files', 'include_dirs': 'include_dirs', 'swig_opts': "['-py3']", 'extra_compile_args': 'extra_args', 'extra_link_args': 'extra_link_args', 'depends': "['numpy']", 'optional': '(False)'}), "('EggNetExtension._EggNetExtension', sources=source_files,\n include_dirs=include_dirs, swig_opts=['-py3'], extra_compile_args=\n extra_args, extra_link_args=extra_link_args, depends=['numpy'],\n optional=False)\n", (2722, 2939), False, 'from setuptools import Extension\n'), ((3110, 3498), 'setuptools.setup', 'setup', ([], {'name': '"""EggNetExtension"""', 'version': '"""1.0"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'description': '"""NN calculation library for python"""', 'url': '"""https://github.com/marbleton/FPGA_MNIST"""', 'packages': "['EggNetExtension']", 'package_data': "{'': ['*.txt', '*.rst', '*.i', '*.c', '*.h']}", 'ext_modules': '[NN_ext_module]', 'install_requires': "['numpy', 'wget', 'idx2numpy']"}), "(name='EggNetExtension', version='1.0', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', description=\n 'NN calculation library for python', url=\n 'https://github.com/marbleton/FPGA_MNIST', packages=['EggNetExtension'],\n package_data={'': ['*.txt', '*.rst', '*.i', '*.c', '*.h']}, ext_modules\n =[NN_ext_module], install_requires=['numpy', 'wget', 'idx2numpy'])\n", (3115, 3498), False, 'from setuptools import setup, find_packages\n'), ((462, 481), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (479, 481), False, 'import numpy\n'), ((1372, 1404), 'wget.download', 'wget.download', (['np_file_url', 'path'], {}), '(np_file_url, path)\n', (1385, 1404), False, 'import wget\n'), ((1455, 1498), 'os.path.exists', 'os.path.exists', (['"""./EggNetExtension/numpy.i"""'], {}), "('./EggNetExtension/numpy.i')\n", (1469, 1498), False, 'import os\n'), ((2310, 2327), 'platform.system', 'platform.system', ([], {}), '()\n', (2325, 2327), False, 'import platform\n'), ((525, 550), 'numpy.get_numpy_include', 'numpy.get_numpy_include', ([], {}), '()\n', (548, 550), False, 'import numpy\n'), ((1567, 1592), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1582, 1592), False, 'import os\n'), ((2377, 2394), 'platform.system', 'platform.system', ([], {}), '()\n', (2392, 2394), False, 'import platform\n'), ((780, 831), 're.compile', 're.compile', (['"""(?P<MAJOR>[0-9]+)\\\\.(?P<MINOR>[0-9]+)"""'], {}), "('(?P<MAJOR>[0-9]+)\\\\.(?P<MINOR>[0-9]+)')\n", (790, 831), False, 'import re\n'), ((2518, 2535), 'platform.system', 'platform.system', ([], {}), '()\n', (2533, 2535), False, 'import platform\n')] |
import discord
import subprocess
import os, random, re, requests, json
import asyncio
from datetime import datetime
from discord.ext import commands
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('[+] Trashmoney Code ACTIVE!')
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.command(aliases=['tm'])
async def trashmoney(self,ctx,amount:int):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
balancee = await update_bank(ctx.author)
time = 10
if amount > balancee[0]:
await ctx.send('You poor lmao! what money u want to TRASH! NOOB!!!')
return
if amount < 0:
await ctx.send('You are poor or wrong put amount?')
return
await update_bank(ctx.author,-1*amount, 'wallet')
await ctx.send(f"{user} Trash he money! type [!!claim] to get the money!")
msg = await ctx.send(f'Member had {time}s to claim!')
with open('trash_money.txt','w') as f:
f.write(str(amount))
f.close()
while True:
time -= 1
if time == 0:
f = open('trash_money.txt','r')
if f.read() == '0':
await ctx.send('Someone claimed the trash money!')
else:
await ctx.send('No one claimed the trash money!')
break
await msg.edit(content=f'Member had {time}s to claim!')
await asyncio.sleep(1)
async def open_account(user):
users = await get_bank_data()
with open('./bank.json','r') as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["wallet"] = 0
users[str(user.id)]["bank"] = 0
with open('./bank.json','w') as f:
json.dump(users,f)
return True
async def get_bank_data():
with open('./bank.json','r') as f:
users = json.load(f)
return users
async def update_bank(user,change = 0,mode = 'wallet'):
users = await get_bank_data()
users[str(user.id)][mode] += change
with open('./bank.json','w') as f:
json.dump(users,f)
balancee = [users[str(user.id)]['wallet'],users[str(user.id)]['bank']]
return balancee
def setup(bot):
bot.add_cog(Economy(bot)) | [
"discord.ext.commands.Cog.listener",
"asyncio.sleep",
"json.load",
"discord.ext.commands.cooldown",
"discord.ext.commands.command",
"json.dump"
] | [((236, 259), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (257, 259), False, 'from discord.ext import commands\n'), ((333, 383), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(60)', 'commands.BucketType.user'], {}), '(1, 60, commands.BucketType.user)\n', (350, 383), False, 'from discord.ext import commands\n'), ((387, 419), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['tm']"}), "(aliases=['tm'])\n", (403, 419), False, 'from discord.ext import commands\n'), ((1695, 1707), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1704, 1707), False, 'import os, random, re, requests, json\n'), ((1940, 1959), 'json.dump', 'json.dump', (['users', 'f'], {}), '(users, f)\n', (1949, 1959), False, 'import os, random, re, requests, json\n'), ((2063, 2075), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2072, 2075), False, 'import os, random, re, requests, json\n'), ((2278, 2297), 'json.dump', 'json.dump', (['users', 'f'], {}), '(users, f)\n', (2287, 2297), False, 'import os, random, re, requests, json\n'), ((1551, 1567), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1564, 1567), False, 'import asyncio\n')] |
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
import requests
import csv
@shared_task
def getStockQuote(room_group_name, stock_code):
url = 'https://stooq.com/q/l/?s=%s&f=sd2t2ohlcv&h&e=csv'
with requests.Session() as s:
response = s.get(url % stock_code)
if response.status_code != 200:
print('getStockQuote - Failed to get data: %s', response.status_code)
async_to_sync(channel_layer.group_send)(
room_group_name,
{
'type': 'bot_message',
'data': {
'command': 'bot_message',
'from': 'Bot stock',
'message': 'Error getting %s information' % stock_code.upper()
}
}
)
else:
data = csv.DictReader(response.text.strip().split('\n'))
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
room_group_name,
{
'type': 'bot_message',
'data': {
'command': 'bot_message',
'from': 'Bot stock',
'message': '%s quote is $%s per share' % (stock_code.upper(), next(data)['Close'])
}
}
)
return
| [
"requests.Session",
"asgiref.sync.async_to_sync",
"channels.layers.get_channel_layer"
] | [((333, 351), 'requests.Session', 'requests.Session', ([], {}), '()\n', (349, 351), False, 'import requests\n'), ((1047, 1066), 'channels.layers.get_channel_layer', 'get_channel_layer', ([], {}), '()\n', (1064, 1066), False, 'from channels.layers import get_channel_layer\n'), ((535, 574), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (548, 574), False, 'from asgiref.sync import async_to_sync\n'), ((1079, 1118), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (1092, 1118), False, 'from asgiref.sync import async_to_sync\n')] |
from jobbergate import appform
def mainflow(data):
return [appform.Const("val", default=10)]
| [
"jobbergate.appform.Const"
] | [((65, 97), 'jobbergate.appform.Const', 'appform.Const', (['"""val"""'], {'default': '(10)'}), "('val', default=10)\n", (78, 97), False, 'from jobbergate import appform\n')] |
# -*- coding: utf-8 -*-
import re
__author__ = 'luckydonald'
__all__ = [
'USERNAME_REGEX', '_USERNAME_REGEX', 'USER_AT_REGEX', '_USER_AT_REGEX',
'FULL_USERNAME_REGEX', '_FULL_USERNAME_REGEX'
]
_USERNAME_REGEX = '[a-zA-Z](?:[a-zA-Z0-9]|_(?!_)){3,30}[a-zA-Z0-9]' # https://regex101.com/r/nZdOHS/2
USERNAME_REGEX = re.compile(_USERNAME_REGEX)
_USER_AT_REGEX = '@(?P<username>' + _USERNAME_REGEX + ')'
USER_AT_REGEX = re.compile(_USER_AT_REGEX)
from .urls.telegram import _TELEGRAM_DOMAIN_REGEX
_FULL_USERNAME_REGEX = '(?P<prefix>(?P<domain>' + _TELEGRAM_DOMAIN_REGEX + ')|@)(?P<username>' + _USERNAME_REGEX + ')'
FULL_USERNAME_REGEX = re.compile(_FULL_USERNAME_REGEX)
| [
"re.compile"
] | [((323, 350), 're.compile', 're.compile', (['_USERNAME_REGEX'], {}), '(_USERNAME_REGEX)\n', (333, 350), False, 'import re\n'), ((426, 452), 're.compile', 're.compile', (['_USER_AT_REGEX'], {}), '(_USER_AT_REGEX)\n', (436, 452), False, 'import re\n'), ((645, 677), 're.compile', 're.compile', (['_FULL_USERNAME_REGEX'], {}), '(_FULL_USERNAME_REGEX)\n', (655, 677), False, 'import re\n')] |
from datetime import datetime, timedelta
import pytest
from django.test import TestCase
from tests.models import Org, Sub, Widget
data_org = {"name": "Acme Widgets"}
class FieldTestCase(TestCase):
def setUp(self):
self.org = Org.objects.create(**data_org)
self.created = datetime.now()
self.one_sec = timedelta(seconds=1)
pass
# org = Org.objects.create(**data_org)
def test_obj_creation(self):
assert self.one_sec > self.created - self.org.created
assert self.one_sec > self.created - self.org.updated
def test_updated(self):
self.org.name = "Updated"
self.org.save()
now = datetime.now()
assert self.one_sec > self.created - self.org.created
assert self.one_sec > now - self.org.updated
| [
"datetime.datetime.now",
"datetime.timedelta",
"tests.models.Org.objects.create"
] | [((242, 272), 'tests.models.Org.objects.create', 'Org.objects.create', ([], {}), '(**data_org)\n', (260, 272), False, 'from tests.models import Org, Sub, Widget\n'), ((296, 310), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (308, 310), False, 'from datetime import datetime, timedelta\n'), ((334, 354), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (343, 354), False, 'from datetime import datetime, timedelta\n'), ((679, 693), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (691, 693), False, 'from datetime import datetime, timedelta\n')] |
from flask import Flask, render_template, request, redirect, url_for, Markup, \
flash # Imports Flask and all required modules
import databasemanager # Provides the functionality to load stuff from the database
app = Flask(__name__)
import errormanager # Enum for types of errors
# DECLARE datamanager as TYPE: databasemanager
datamanager = databasemanager
# DECLARE errorman as TYPE: errormanager
errorman = errormanager
# DECLARE Current User as string
# Provides a means of the application knowing who is signed in
CurrentUser: str
# Route function for homepage.
# @return Returns render template of base.hmtl
@app.route('/')
def Home():
datamanager.LoadContent()
return render_template('base.html', entries=datamanager.entries, bFailure=False, app=datamanager)
# Checks the username and the password and handles any errors
# @route Homepage
# @method: POST
# @return redirect: Redirect to 'AdminHome' function after successful login
# @return render_template: base.html with failure condition
@app.route('/', methods=['POST'])
def Login():
if request.method == "POST":
try:
password = request.form['Password']
username = request.form['Username']
if (password != '') and (username != ''):
if datamanager.CheckUser(username, password) == True:
global CurrentUser
CurrentUser = username
globals()
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=username))
else:
Failure = errorman.EErrorType.FailedPassword
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType,
entries=datamanager.entries, bFailure=True, app=datamanager)
else:
Failure = errorman.EErrorType.FailedNone
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType, bFailure=True,
entires=datamanager.entries, app=datamanager)
except:
return render_template('base.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType,
bFailure=True, entries=datamanager.entries)
# Main route for admin homepage
# Checks for encrypted string to ensure access was granted
# @route: '/adminbase' <auth: encrypted string> <user: user's username>
# @param auth: Encrypted string used for security
# @param user: Username of user
# @return render_template: adminbase.html with entries, the username and the datamanager
# @return redirect: 'Home' will return the user to home if they don't have valid acsses
@app.route('/adminbase/<auth> <user>')
def AdminHome(auth, user):
if auth == str(datamanager.Encrypt('True')):
datamanager.LoadContent()
print(datamanager.entries)
return render_template('adminbase.html', entries=datamanager.entries, user=user, app=datamanager)
else:
return redirect(url_for('Home'))
# Gets the users inputted values for a new entry and adds them to the website
# @route: '/adminbase.html' <user: username of signed in user>
# @param user: username of the signed in user
# @return redirect: 'Admin Home' function with encryption string and username
@app.route('/adminbase.html/<user>', methods=["POST"])
def CreateNew(user: str):
if request.method == "POST":
# try:
title = request.form['Title']
desc = request.form['Desc']
image = request.form['Image']
caption = request.form['Caption']
id = len(datamanager.entries)
ind = str(id)
datamanager.AddNewItem(title, desc, caption, image, id, ind, 0)
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=user))
# except:
# return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
#@<EMAIL>('/adminbase', methods=["POST"])
#def Delete():
#if request.method == "POST":
# delete = request.form['Del']
# if delete == True:
# datamanager.RemoveItem(0)
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
#else:
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
# Main route for signup page
# @route: '/signup'
# @return render_template: signup.html
@app.route('/signup')
def SignUp():
return render_template('signup.html')
# Gets the entry input values and adds to database also handles errors
# @route '/sign' methods: GET and POST
# @return redirect: 'Home'
# @return render_template: 'error.html' with error type
@app.route('/sign', methods=["POST", "GET"])
def AddNewUser():
try:
if request.method == "POST":
AdminKey = request.form['Key']
Password = request.form['Password']
Username = request.form['Username']
ConfirmPass = request.form['<PASSWORD>']
if datamanager.CheckKey(AdminKey) == True:
if ((Password != '') and (Username != '') and (ConfirmPass != '')):
if ConfirmPass == Password:
if datamanager.NewUser(Username, Password) == True:
return redirect(url_for('Home'))
else:
return render_template('error.html', fail=errorman.EErrorType.FailedPassword,
failenum=errorman.EErrorType)
else:
return render_template('error.html', fail=errorman.EErrorType.FailedNone,
failenum=errorman.EErrorType)
return render_template('error.html')
except:
return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
@app.route('/likes/<id>')
def Like(id: int):
datamanager.AddLike(id)
return redirect(url_for('Home'))
# Deprecated
@app.route('/deleteconfirm', methods=['GET'])
def ChangeDeleteTarget():
id = request.form['Delete']
global deletetarget
deletetarget = id
print(deletetarget)
globals()
return 'hi' # This exists because Flask is bad
# Deprecated
@app.route('/delete')
def Delete():
datamanager.RemoveItem(datamanager.deletetarget)
global CurrentUser
CurrentUser = 'user'
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=CurrentUser, app=datamanager))
# Main Flask Loop
if __name__ == '__main__':
app.secret_key = datamanager.Encrypt('key')
app.run()
| [
"flask.render_template",
"flask.url_for",
"flask.Flask"
] | [((228, 243), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((718, 812), 'flask.render_template', 'render_template', (['"""base.html"""'], {'entries': 'datamanager.entries', 'bFailure': '(False)', 'app': 'datamanager'}), "('base.html', entries=datamanager.entries, bFailure=False,\n app=datamanager)\n", (733, 812), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((4654, 4684), 'flask.render_template', 'render_template', (['"""signup.html"""'], {}), "('signup.html')\n", (4669, 4684), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((3017, 3111), 'flask.render_template', 'render_template', (['"""adminbase.html"""'], {'entries': 'datamanager.entries', 'user': 'user', 'app': 'datamanager'}), "('adminbase.html', entries=datamanager.entries, user=user,\n app=datamanager)\n", (3032, 3111), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((5938, 5967), 'flask.render_template', 'render_template', (['"""error.html"""'], {}), "('error.html')\n", (5953, 5967), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((6207, 6222), 'flask.url_for', 'url_for', (['"""Home"""'], {}), "('Home')\n", (6214, 6222), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((3144, 3159), 'flask.url_for', 'url_for', (['"""Home"""'], {}), "('Home')\n", (3151, 3159), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((5997, 6098), 'flask.render_template', 'render_template', (['"""error.html"""'], {'fail': 'errorman.EErrorType.FailedNone', 'failenum': 'errorman.EErrorType'}), "('error.html', fail=errorman.EErrorType.FailedNone, failenum\n =errorman.EErrorType)\n", (6012, 6098), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((1994, 2131), 'flask.render_template', 'render_template', (['"""base.html"""'], {'fail': 'Failure', 'failenum': 'errorman.EErrorType', 'bFailure': '(True)', 'entires': 'datamanager.entries', 'app': 'datamanager'}), "('base.html', fail=Failure, failenum=errorman.EErrorType,\n bFailure=True, entires=datamanager.entries, app=datamanager)\n", (2009, 2131), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((2205, 2349), 'flask.render_template', 'render_template', (['"""base.html"""'], {'fail': 'errorman.EErrorType.FailedNone', 'failenum': 'errorman.EErrorType', 'bFailure': '(True)', 'entries': 'datamanager.entries'}), "('base.html', fail=errorman.EErrorType.FailedNone, failenum=\n errorman.EErrorType, bFailure=True, entries=datamanager.entries)\n", (2220, 2349), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((1715, 1852), 'flask.render_template', 'render_template', (['"""base.html"""'], {'fail': 'Failure', 'failenum': 'errorman.EErrorType', 'entries': 'datamanager.entries', 'bFailure': '(True)', 'app': 'datamanager'}), "('base.html', fail=Failure, failenum=errorman.EErrorType,\n entries=datamanager.entries, bFailure=True, app=datamanager)\n", (1730, 1852), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((5781, 5882), 'flask.render_template', 'render_template', (['"""error.html"""'], {'fail': 'errorman.EErrorType.FailedNone', 'failenum': 'errorman.EErrorType'}), "('error.html', fail=errorman.EErrorType.FailedNone, failenum\n =errorman.EErrorType)\n", (5796, 5882), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((5581, 5685), 'flask.render_template', 'render_template', (['"""error.html"""'], {'fail': 'errorman.EErrorType.FailedPassword', 'failenum': 'errorman.EErrorType'}), "('error.html', fail=errorman.EErrorType.FailedPassword,\n failenum=errorman.EErrorType)\n", (5596, 5685), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n'), ((5505, 5520), 'flask.url_for', 'url_for', (['"""Home"""'], {}), "('Home')\n", (5512, 5520), False, 'from flask import Flask, render_template, request, redirect, url_for, Markup, flash\n')] |
import scipy.sparse as ssp
import scipy.sparse.csgraph as csgraph
import networkx as nx
import pylab as pl
import pygraphviz as pgv
from itertools import product, chain
class DiGraph(ssp.lil_matrix):
"""
An implementation of a directed graph with a Sparse Matrix representation using Scipy's sparse module.
Specifically the lil_matrix representation is used since it allows for efficient modification of the
sparse structure, which is useful for sampling. Most methods are aliases for operations we can perform directly
with the ones we inherit from the Scipy sparse matrix class.
Parameters
----------
arg1: object
This can be instantiated in several ways:
DiGraph(D)
with a dense matrix or rank-2 ndarray D
DiGraph(S)
with another sparse matrix S (equivalent to S.tolil())
DiGraph((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
shape: 2-tuple
The size of the underlying dimensions
dtype: type
The type of the data. Supported are bool for adjacency representations, and float for weighted edges
copy: bool
In case arg1 is a sparse matrix, whether to copy its contents when constructing a new instance
names: list of strings
A list of true names for the nodes of the graph
Attributes
----------
names: list
The list of names of the nodes if any. Useful if using non numerical identifiers for the nodes
"""
def __init__(self, arg1, shape=None, dtype=bool, copy=False, names=None):
if dtype is None:
dtype = bool
elif dtype not in [bool, float]:
raise ValueError('Either adjacency or weighted graph')
super().__init__(arg1, shape, dtype, copy)
self._names = names
@property
def n_nodes(self):
return self.shape[0]
@property
def n_edges(self):
return self.count_nonzero()
@property
def names(self):
if self._names is None:
return list(range(self.n_nodes))
return self._names
def nodes(self, as_names=False):
if as_names and self._names is not None:
return self._names
return list(range(self.shape[0]))
def nodes_iter(self, as_names=False):
if as_names and self._names is not None:
return iter(self._names)
return range(self.shape[0])
def edges(self):
return list(zip(*self.nonzero()))
def edges_iter(self):
return zip(*self.nonzero())
def add_edge(self, u, v, value=None):
if not self.is_valid_edge(u, v):
raise ValueError('Edge {0}-->{1} cannot be added'.format(u, v))
if value is None:
value = 1
self[u, v] = value
def add_edges(self, edges, value=None):
if any(map(lambda e: not self.is_valid_edge(*e), edges)):
raise ValueError('At least one edge cannot be added')
if value is None:
value = 1
us, vs = list(zip(*edges))
self[us, vs] = value
def remove_edge(self, u, v):
self[u, v] = 0
def remove_edges(self, edges):
us, vs = zip(*edges)
self[us, vs] = 0
def parents(self, node):
return self.T[node].nonzero()[1]
def children(self, node):
return self[node].nonzero()[1]
def descendants(self, node, sort=False):
descendants = csgraph.breadth_first_order(self, i_start=node, directed=True, return_predecessors=False)[1:]
if not sort:
return descendants
return sorted(descendants)
def ancestors(self, node, sort=False):
ancestors = csgraph.breadth_first_order(self.T, i_start=node, directed=True, return_predecessors=False)[1:]
if not sort:
return ancestors
return sorted(ancestors)
def has_path(self, u, v):
return u in self.ancestors(v)
def is_valid_edge(self, u, v):
return u != v
def copy(self):
arg1 = ssp.lil_matrix.copy(self)
a = DiGraph(arg1=arg1, names=self._names)
return a
def to_nx_digraph(self):
return nx.from_scipy_sparse_matrix(self, create_using=nx.DiGraph())
class MBCGraph(DiGraph):
def __init__(self, arg1, n_features, shape=None, dtype=None, copy=False, names=None):
super().__init__(arg1, shape, dtype, copy, names)
self.n_features = n_features
@property
def n_targets(self):
return self.shape[0] - self.n_features
def is_valid_edge(self, u, v):
# if u >= n_features it's a target i.e. it can have edges to any variables
# if not, then it's a feature and v must also be a feature i.e. < n_features
return u != v and (u >= self.n_features or v < self.n_features)
def copy(self):
arg1 = ssp.lil_matrix.copy(self)
a = MBCGraph(arg1=arg1, n_features=self.n_features, names=self._names)
return a
# Helper functions
def possible_edges_iter(targets, feature):
edges = chain(product(targets, targets), product(targets, feature), product(feature, feature))
edges = filter(lambda e: e[0] != e[1], edges)
return edges
def possible_edges(targets, features):
return list(possible_edges_iter(targets, features))
def topsort(G: ssp.spmatrix, nodes=None, reverse=False):
order = []
seen = set()
explored = set()
if nodes is None:
nodes = range(G.shape[0])
for v in nodes: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w].nonzero()[1]:
if n not in explored:
if n in seen: # CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
if reverse:
return order
else:
return list(reversed(order))
def plot_digraph(graph: DiGraph):
nx.draw_networkx(nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph()))
pl.show()
def load_graph(path):
dot_graph = pgv.AGraph(filename=path)
if 'names' in dot_graph.graph_attr:
names = dot_graph.graph_attr['names']
else:
names = None
dtype = dot_graph.graph_attr['data_type']
if dtype == 'bool':
dtype = bool
elif dtype == 'float64':
dtype = float
else:
raise ValueError('Unrecognized data type')
n_nodes = dot_graph.number_of_nodes()
graph = DiGraph((n_nodes, n_nodes), dtype=dtype, names=names)
if dtype == bool:
u, v = zip(dot_graph.edges_iter())
u = list(map(int, u))
v = list(map(int, v))
graph[u, v] = True
else:
for u, v in dot_graph.edges():
weight = dot_graph.get_edge(u, v).attr['weight']
graph[int(u), int(v)] = weight
return graph
def save_graph(graph: DiGraph, path):
if path[-3:] != '.gv' and path[-4:] != '.dot':
path += '.gv'
if graph._names is None:
dot_graph = pgv.AGraph(data_type=str(graph.dtype))
else:
dot_graph = pgv.AGraph(data_type=str(graph.dtype), names=graph._names)
dot_graph.add_nodes_from(graph.nodes())
if graph.dtype == bool:
dot_graph.add_edges_from(graph.edges())
else:
for u, v in graph.edges_iter():
dot_graph.add_edge(u, v, weight=graph[u, v])
dot_graph.write(path)
| [
"networkx.NetworkXUnfeasible",
"scipy.sparse.csgraph.breadth_first_order",
"pylab.show",
"itertools.product",
"networkx.DiGraph",
"scipy.sparse.lil_matrix.copy",
"pygraphviz.AGraph"
] | [((6726, 6735), 'pylab.show', 'pl.show', ([], {}), '()\n', (6733, 6735), True, 'import pylab as pl\n'), ((6776, 6801), 'pygraphviz.AGraph', 'pgv.AGraph', ([], {'filename': 'path'}), '(filename=path)\n', (6786, 6801), True, 'import pygraphviz as pgv\n'), ((4114, 4139), 'scipy.sparse.lil_matrix.copy', 'ssp.lil_matrix.copy', (['self'], {}), '(self)\n', (4133, 4139), True, 'import scipy.sparse as ssp\n'), ((4924, 4949), 'scipy.sparse.lil_matrix.copy', 'ssp.lil_matrix.copy', (['self'], {}), '(self)\n', (4943, 4949), True, 'import scipy.sparse as ssp\n'), ((5128, 5153), 'itertools.product', 'product', (['targets', 'targets'], {}), '(targets, targets)\n', (5135, 5153), False, 'from itertools import product, chain\n'), ((5155, 5180), 'itertools.product', 'product', (['targets', 'feature'], {}), '(targets, feature)\n', (5162, 5180), False, 'from itertools import product, chain\n'), ((5182, 5207), 'itertools.product', 'product', (['feature', 'feature'], {}), '(feature, feature)\n', (5189, 5207), False, 'from itertools import product, chain\n'), ((3525, 3618), 'scipy.sparse.csgraph.breadth_first_order', 'csgraph.breadth_first_order', (['self'], {'i_start': 'node', 'directed': '(True)', 'return_predecessors': '(False)'}), '(self, i_start=node, directed=True,\n return_predecessors=False)\n', (3552, 3618), True, 'import scipy.sparse.csgraph as csgraph\n'), ((3771, 3866), 'scipy.sparse.csgraph.breadth_first_order', 'csgraph.breadth_first_order', (['self.T'], {'i_start': 'node', 'directed': '(True)', 'return_predecessors': '(False)'}), '(self.T, i_start=node, directed=True,\n return_predecessors=False)\n', (3798, 3866), True, 'import scipy.sparse.csgraph as csgraph\n'), ((4299, 4311), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4309, 4311), True, 'import networkx as nx\n'), ((6707, 6719), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6717, 6719), True, 'import networkx as nx\n'), ((6155, 6203), 'networkx.NetworkXUnfeasible', 'nx.NetworkXUnfeasible', (['"""Graph contains a cycle."""'], {}), "('Graph contains a cycle.')\n", (6176, 6203), True, 'import networkx as nx\n')] |
##
## Software PI-Net: Pose Interacting Network for Multi-Person Monocular 3D Pose Estimation
## Copyright Inria and UPC
## Year 2021
## Contact : <EMAIL>
##
## The software PI-Net is provided under MIT License.
##
#used in train for skeleton input
import os
import os.path as osp
import numpy as np
import math
from utils.pose_utils import get_bbox
from pycocotools.coco import COCO
from config import cfg
import json
from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel
from utils.vis import vis_keypoints, vis_3d_skeleton
import cv2 as cv
def larger_bbox(bbox):
w = bbox[2]
h = bbox[3]
c_x = bbox[0] + w/2.
c_y = bbox[1] + h/2.
aspect_ratio = cfg.input_shape[1]/cfg.input_shape[0]
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
bbox[2] = w*1.25
bbox[3] = h*1.25
bbox[0] = c_x - bbox[2]/2.
bbox[1] = c_y - bbox[3]/2.
return bbox
class MuCo:
def __init__(self, data_split, is_val):
self.data_split = data_split
self.img_dir = osp.join(cfg.data_dir, 'MuCo', 'data')
self.train_annot_path = cfg.train_annot_path
self.val_annot_path = cfg.val_annot_path
self.joint_num = 21
self.joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
self.flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
self.skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
self.joints_have_depth = True
self.root_idx = self.joints_name.index('Pelvis')
self.is_val = is_val
self.pair_index_path = cfg.pair_index_path_muco
self.data = self.load_data()
def load_data(self):
if self.data_split == 'train':
db = COCO(self.train_annot_path)
data = []
id2pairId = json.load(open(self.pair_index_path,'r'))
n = 0
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy = img['f']
cx, cy = img['c']
f = np.array([fx, fy]); c = np.array([cx, cy]);
joint_cam = np.array(ann['keypoints_cam'])
joint_cam_posenet = np.array(ann['keypoints_cam_posenet'])
root_cam = joint_cam[self.root_idx]
joint_img = np.array(ann['keypoints_img'])
joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1)
joint_img[:,2] = joint_img[:,2] - root_cam[2]
joint_vis = np.ones((self.joint_num,1))
bbox_id = ann['id']
orig_bbox = ann['bbox']
bbox = np.array(ann['bbox'])
img_width, img_height = img['width'], img['height']
x, y, w, h = bbox
center = [x+w/2, y+h/2]
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((img_width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((img_height - 1, y1 + np.max((0, h - 1))))
if w*h > 0 and x2 >= x1 and y2 >= y1:
bbox = np.array([x1, y1, x2-x1, y2-y1])
else:
print("sanitize bboxes:",image_id)
continue
bbox = larger_bbox(bbox)
n_copain = id2pairId[str(bbox_id)] - bbox_id + n # n_copain - n = id_copain - id
id_list = db.getAnnIds(image_id) # ids of instances in same img
dis2id = {}
n_list = []
for cand_id in id_list:
bbox_cand = db.loadAnns(cand_id)[0]['bbox']
center_cand = [bbox_cand[0] + bbox_cand[2]/2, bbox_cand[1] + bbox_cand[3]/2]
dis = math.sqrt((center[0] - center_cand[0])**2 + (center[1] - center_cand[1])**2)
dis2id[dis] = cand_id
id_list_sorted = [dis2id[k] for k in sorted(dis2id.keys())]
for cand_id in id_list_sorted:
n_list.append(cand_id - bbox_id + n)
data.append({
'img_id': image_id,
'img_path': img_path,
'id': bbox_id,
'n_copain': n_copain,
'n_list': n_list,
'orig_bbox': orig_bbox,
'bbox': bbox,
'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
'joint_cam_posenet': joint_cam_posenet, # result from posenet_nonefine
#'noise': noise,
})
n = n + 1
return data
def evaluate(self, preds, result_dir):
# test for img output, use in test.py
# add posenet 3d cam result to gt file as 'MuPoTS-3D_with_posenet_result.json', add key 'keypoints_cam_posenet'
gts = self.load_data()#self.data
sample_num = len(preds)
joint_num = self.joint_num
pred_2d_per_bbox = {}
pred_2d_save = {}
pred_3d_save = {}
gt_dict_orig = json.load(open('data/MuCo/data/annotations/MuCo-3DHP.json','r'))
gt_dict = gt_dict_orig
for n in range(sample_num):
gt = gts[n]
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
bbox_id = gt['id']
f = gt['f']
c = gt['c']
pred_2d_kpt = preds[n].copy()
pred_2d_kpt = warp_coord_to_original(pred_2d_kpt, bbox, gt_3d_root)
if str(n) in pred_2d_per_bbox:
pred_2d_per_bbox[str(n)].append(pred_2d_kpt)
else:
pred_2d_per_bbox[str(n)] = [pred_2d_kpt]
pred_2d_kpt = pred_2d_per_bbox[str(n)].copy()
pred_2d_kpt = np.mean(np.array(pred_2d_kpt), axis=0)
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
### add posenet 3d cam result to gt file as 'MuCo_with_posenet_result.json', add key 'keypoints_cam_posenet'
gt_dict['annotations'][int(bbox_id)]['keypoints_cam_posenet'] = pred_3d_kpt.tolist()
with open('data/MuCo/MuCo_with_posenet_result.json','w') as w:
json.dump(gt_dict, w)
| [
"numpy.ones",
"utils.pose_utils.warp_coord_to_original",
"json.dump",
"os.path.join",
"pycocotools.coco.COCO",
"math.sqrt",
"numpy.max",
"numpy.array",
"numpy.concatenate",
"utils.pose_utils.pixel2cam"
] | [((1105, 1143), 'os.path.join', 'osp.join', (['cfg.data_dir', '"""MuCo"""', '"""data"""'], {}), "(cfg.data_dir, 'MuCo', 'data')\n", (1113, 1143), True, 'import os.path as osp\n'), ((2129, 2156), 'pycocotools.coco.COCO', 'COCO', (['self.train_annot_path'], {}), '(self.train_annot_path)\n', (2133, 2156), False, 'from pycocotools.coco import COCO\n'), ((2425, 2465), 'os.path.join', 'osp.join', (['self.img_dir', "img['file_name']"], {}), "(self.img_dir, img['file_name'])\n", (2433, 2465), True, 'import os.path as osp\n'), ((2542, 2560), 'numpy.array', 'np.array', (['[fx, fy]'], {}), '([fx, fy])\n', (2550, 2560), True, 'import numpy as np\n'), ((2566, 2584), 'numpy.array', 'np.array', (['[cx, cy]'], {}), '([cx, cy])\n', (2574, 2584), True, 'import numpy as np\n'), ((2611, 2641), 'numpy.array', 'np.array', (["ann['keypoints_cam']"], {}), "(ann['keypoints_cam'])\n", (2619, 2641), True, 'import numpy as np\n'), ((2674, 2712), 'numpy.array', 'np.array', (["ann['keypoints_cam_posenet']"], {}), "(ann['keypoints_cam_posenet'])\n", (2682, 2712), True, 'import numpy as np\n'), ((2786, 2816), 'numpy.array', 'np.array', (["ann['keypoints_img']"], {}), "(ann['keypoints_img'])\n", (2794, 2816), True, 'import numpy as np\n'), ((2841, 2889), 'numpy.concatenate', 'np.concatenate', (['[joint_img, joint_cam[:, 2:]]', '(1)'], {}), '([joint_img, joint_cam[:, 2:]], 1)\n', (2855, 2889), True, 'import numpy as np\n'), ((2970, 2998), 'numpy.ones', 'np.ones', (['(self.joint_num, 1)'], {}), '((self.joint_num, 1))\n', (2977, 2998), True, 'import numpy as np\n'), ((3086, 3107), 'numpy.array', 'np.array', (["ann['bbox']"], {}), "(ann['bbox'])\n", (3094, 3107), True, 'import numpy as np\n'), ((3256, 3270), 'numpy.max', 'np.max', (['(0, x)'], {}), '((0, x))\n', (3262, 3270), True, 'import numpy as np\n'), ((3288, 3302), 'numpy.max', 'np.max', (['(0, y)'], {}), '((0, y))\n', (3294, 3302), True, 'import numpy as np\n'), ((5945, 5998), 'utils.pose_utils.warp_coord_to_original', 'warp_coord_to_original', (['pred_2d_kpt', 'bbox', 'gt_3d_root'], {}), '(pred_2d_kpt, bbox, gt_3d_root)\n', (5967, 5998), False, 'from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel\n'), ((6329, 6357), 'utils.pose_utils.pixel2cam', 'pixel2cam', (['pred_2d_kpt', 'f', 'c'], {}), '(pred_2d_kpt, f, c)\n', (6338, 6357), False, 'from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel\n'), ((6653, 6674), 'json.dump', 'json.dump', (['gt_dict', 'w'], {}), '(gt_dict, w)\n', (6662, 6674), False, 'import json\n'), ((3509, 3545), 'numpy.array', 'np.array', (['[x1, y1, x2 - x1, y2 - y1]'], {}), '([x1, y1, x2 - x1, y2 - y1])\n', (3517, 3545), True, 'import numpy as np\n'), ((4104, 4189), 'math.sqrt', 'math.sqrt', (['((center[0] - center_cand[0]) ** 2 + (center[1] - center_cand[1]) ** 2)'], {}), '((center[0] - center_cand[0]) ** 2 + (center[1] - center_cand[1]) ** 2\n )\n', (4113, 4189), False, 'import math\n'), ((6272, 6293), 'numpy.array', 'np.array', (['pred_2d_kpt'], {}), '(pred_2d_kpt)\n', (6280, 6293), True, 'import numpy as np\n'), ((3348, 3366), 'numpy.max', 'np.max', (['(0, w - 1)'], {}), '((0, w - 1))\n', (3354, 3366), True, 'import numpy as np\n'), ((3415, 3433), 'numpy.max', 'np.max', (['(0, h - 1)'], {}), '((0, h - 1))\n', (3421, 3433), True, 'import numpy as np\n')] |
import discord
import slash_util
class SampleCog(slash_util.Cog):
@slash_util.slash_command(guild_id=123)
async def pog(self, ctx: slash_util.Context):
await ctx.send("pog", ephemeral=True)
@slash_util.message_command(guild_id=123)
async def quote(self, ctx: slash_util.Context, message: discord.Message): # the `message` parameter is REQURIED for message commands
await ctx.send(f"> {message.clean_content}\n- {message.author}")
@slash_util.user_command(guild_id=123)
async def bonk(self, ctx: slash_util.Context, user: discord.Member):
await ctx.send(f"{ctx.author} bonks {user} :hammer:")
def setup(bot):
bot.add_cog(SampleCog(bot))
| [
"slash_util.slash_command",
"slash_util.user_command",
"slash_util.message_command"
] | [((72, 110), 'slash_util.slash_command', 'slash_util.slash_command', ([], {'guild_id': '(123)'}), '(guild_id=123)\n', (96, 110), False, 'import slash_util\n'), ((213, 253), 'slash_util.message_command', 'slash_util.message_command', ([], {'guild_id': '(123)'}), '(guild_id=123)\n', (239, 253), False, 'import slash_util\n'), ((471, 508), 'slash_util.user_command', 'slash_util.user_command', ([], {'guild_id': '(123)'}), '(guild_id=123)\n', (494, 508), False, 'import slash_util\n')] |
# encoding=utf-8
# Elkatip
import os
import imp
# main class
class Elkatip():
api = None
gui = None
def __init__(self):
self.modulePath = os.path.dirname(__file__)
pass
def toExt(self, text):
if not self.api:
api = imp.load_source("api", self.modulePath + "/api.py")
self.api = api.Api()
return self.api.toExt(text)
def toBase(self, text):
if not self.api:
api = imp.load_source("api", self.modulePath + "/api.py")
self.api = api.Api()
return self.api.toBase(text)
def showGui(self):
if not self.gui:
gui = imp.load_source("gui", self.modulePath + "/gui.py")
self.gui = gui.Gui()
self.gui.showGui()
if __name__ == "__main__":
ktp = Elkatip()
ktp.showGui()
# uighurche = "ئالىمجان" # base
# print(uighurche)
# uyghurqa = ktp.toExt(uighurche) # ext
# uighurche = ktp.toBase(uyghurqa) # base
# print(uyghurqa)
# print(uighurche)
| [
"os.path.dirname",
"imp.load_source"
] | [((162, 187), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (177, 187), False, 'import os\n'), ((272, 323), 'imp.load_source', 'imp.load_source', (['"""api"""', "(self.modulePath + '/api.py')"], {}), "('api', self.modulePath + '/api.py')\n", (287, 323), False, 'import imp\n'), ((485, 536), 'imp.load_source', 'imp.load_source', (['"""api"""', "(self.modulePath + '/api.py')"], {}), "('api', self.modulePath + '/api.py')\n", (500, 536), False, 'import imp\n'), ((674, 725), 'imp.load_source', 'imp.load_source', (['"""gui"""', "(self.modulePath + '/gui.py')"], {}), "('gui', self.modulePath + '/gui.py')\n", (689, 725), False, 'import imp\n')] |
from __future__ import absolute_import
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import KernelDensity
from sklearn.linear_model import LinearRegression, LogisticRegression
import pickle
import os
import matplotlib.pylab as plt
from sklearn.externals import joblib
import numpy as np
from sklearn.model_selection import GridSearchCV
import seaborn as sbn
import logging
from .absmodel import Module
logger_format = "%(levelname)s [%(asctime)s]: %(message)s"
logging.basicConfig(filename="logfile.log",
level=logging.DEBUG, format=logger_format,
filemode='w') # use filemode='a' for APPEND
logger = logging.getLogger(__name__)
def grid_fit_kde(residual):
"""
Grid search for best bandwidth of KDE
Args:
residual: residual value.
Returns:
"""
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
class MixLinearModel(Module):
"""
Mixture of linear src.
Train logistic regression for 0/1 prediction. And fit weighted linear regression,
with weight from output of the logistic regression.
Fit mixture of linear-src for rainy and non-rainy events.
"""
def __init__(self, linear_reg=LinearRegression(), log_reg=LogisticRegression(),
kde=KernelDensity(kernel="gaussian"), eps=0.0001, offset = -.05):
super(MixLinearModel, self).__init__()
self.linear_reg = linear_reg
self.eps = eps
self.log_reg = log_reg
self.kde = kde
self.fitted = False
self.residual = False
self.offset= offset
@staticmethod
def residual_plot(observed, true_value, fitted):
plt.scatter(true_value, np.log(observed))
plt.plot(true_value, fitted, '-r')
plt.xlabel('Log (predictor + eps)')
plt.ylabel('Log (response + eps)')
plt.show()
@staticmethod
def residual_density_plot(residual):
plt.subplot(211)
sbn.distplot(residual,hist=True )
plt.subplot(212)
sbn.kdeplot(residual)
@staticmethod
def grid_fit_kde(residual):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
def _fit(self, x, y, verbose=False, load=False):
"""
Args:
y: Nx1 ndarray observed value.
x: NxD ndarry features.
Returns:
"""
x, y = Module.validate(x, y)
l_x, l_y = np.log(x + self.eps), np.log(y + self.eps)
y_zero_one = (y > 0.0).astype(int)
if y_zero_one.max() == y_zero_one.min():
raise NotFittedError("Logistic model couldn't fit, because the number of classes is <2")
self.log_reg.fit(x, y_zero_one)
sample_weight = self.log_reg.predict_proba(x)[:, 1]
# Linear regression under log mode.
self.linear_reg.fit(X=l_x, y=l_y, sample_weight=sample_weight)
self.fitted = self.linear_reg.predict(l_x)
self.residual = (self.fitted - l_y)
# Grid fit for bandwidth.
if load is False:
param = grid_fit_kde(self.residual)
self.kde = KernelDensity(bandwidth=param["bandwidth"])
self.kde.fit(self.residual)
else:
self.kde = pickle.load(open("all_kde.kd","rb"))
self.fitted = True
#logger.debug("KDE bandwidth %s"%self.kde.bandwidth)
return self
def predict(self, x, y, label=None):
"""
Predict log-likelihood of given observation under the trained src.
Args:
y: ndarray Ground truth observation.
x: ndarray matrix Features.
label: None,
Returns:
"""
x , y = Module.validate(x, y)
if self.fitted is False:
raise NotFittedError("Call fit before prediction")
log_pred = self.log_reg.predict_proba(x)[:, 1]
linear_pred = self.linear_reg.predict(np.log(x + self.eps))
return self.mixl(y, log_pred, linear_pred)
def decision_function(self, score):
"""
Return decision based on the anomaly score.
Args:
x:
y:
label:
Returns:
"""
return score - self.offset
def mixl(self, y, logreg_prediction, linear_predictions):
"""
- if RAIN = 0, $ -log (1-p_1)$
- if RAIN > 0, $ -log [p_1 \frac{P(log(RAIN + \epsilon)}{(RAIN + \epsilon)}]$
Args:
y: (np.array) observations.
logreg_prediction:(np.array) fitted values from logistic regression (0/1 src).
linear_predictions:(np.array) fitted values from linear regression on log scale.
"""
# Reshape the data
p = logreg_prediction.reshape([-1, 1])
observations = y.reshape([-1, 1])
predictions = linear_predictions.reshape([-1, 1])
zero_rain = np.multiply((1 - p), (observations == 0))
# density of residual and convert to non-log value.
residual = predictions - np.log(observations + self.eps)
residual_density = np.exp(self.kde.score_samples(residual)).reshape(-1,1)
non_zero_rain = np.divide(np.multiply(p, residual_density),
(observations + self.eps))
result = zero_rain + non_zero_rain
return -np.log(result + np.max(result))
def to_json(self):
if not self.fitted:
raise NotFittedError("Fit method should be called before save operation.")
model_config = {
"kde_model": self.kde,
"logistic_model": self.log_reg,
"linear_model": self.linear_reg
}
return model_config
@classmethod
def from_json(cls, model_config):
mlm = MixLinearModel(linear_reg=model_config['linear_model'], log_reg=model_config['logistic_model'],
kde=model_config['kde_model'])
mlm.fitted = True
return mlm
def save(self, model_id="001", model_path="rainqc_model"):
"""
save the reg src.
Returns:
"""
# model_config = {"model_id":model_id,
# "kde":self.kde,
# "logistic_reg":self.log_reg,
# "linear_regression":self.linear_reg}
# localdatasource.dump(model_config,open(model_id+".localdatasource","wb"))
current_model = os.path.join(model_path, model_id)
if not os.path.exists(current_model):
os.makedirs(current_model)
joblib.dump(self.kde, os.path.join(current_model, "kde_model.pk"))
joblib.dump(self.linear_reg, os.path.join(current_model, "linear_model.pk"))
joblib.dump(self.log_reg, os.path.join(current_model, "logistic_model.pk"))
@classmethod
def load(cls, model_id="001", model_path="rainqc_model"):
loaded_model = os.path.join(model_path, model_id)
# model_config = localdatasource.load(open(model_id+".localdatasource","rb"))
if not os.path.exists(loaded_model):
return ValueError("Directory for saved models don't exist")
reg_model = joblib.load(os.path.join(loaded_model, "linear_model.pk"))
kde = joblib.load(os.path.join(loaded_model, "kde_model.pk"))
log_reg = joblib.load(os.path.join(loaded_model, "logistic_model.pk")) # pickle.load(model_config['zerone'])
mxll = MixLinearModel(linear_reg=reg_model, log_reg=log_reg, kde=kde)
mxll.fitted = True
return mxll
| [
"logging.getLogger",
"sklearn.exceptions.NotFittedError",
"numpy.log",
"matplotlib.pylab.show",
"os.path.exists",
"numpy.multiply",
"seaborn.distplot",
"sklearn.neighbors.KernelDensity",
"numpy.max",
"numpy.linspace",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlabel",
"sklearn.linear_model.... | [((486, 591), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""logfile.log"""', 'level': 'logging.DEBUG', 'format': 'logger_format', 'filemode': '"""w"""'}), "(filename='logfile.log', level=logging.DEBUG, format=\n logger_format, filemode='w')\n", (505, 591), False, 'import logging\n'), ((667, 694), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (684, 694), False, 'import logging\n'), ((866, 881), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (879, 881), False, 'from sklearn.neighbors import KernelDensity\n'), ((1316, 1334), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1332, 1334), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1344, 1364), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1362, 1364), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1387, 1419), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""'}), "(kernel='gaussian')\n", (1400, 1419), False, 'from sklearn.neighbors import KernelDensity\n'), ((1827, 1861), 'matplotlib.pylab.plot', 'plt.plot', (['true_value', 'fitted', '"""-r"""'], {}), "(true_value, fitted, '-r')\n", (1835, 1861), True, 'import matplotlib.pylab as plt\n'), ((1870, 1905), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Log (predictor + eps)"""'], {}), "('Log (predictor + eps)')\n", (1880, 1905), True, 'import matplotlib.pylab as plt\n'), ((1914, 1948), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Log (response + eps)"""'], {}), "('Log (response + eps)')\n", (1924, 1948), True, 'import matplotlib.pylab as plt\n'), ((1957, 1967), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pylab as plt\n'), ((2036, 2052), 'matplotlib.pylab.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2047, 2052), True, 'import matplotlib.pylab as plt\n'), ((2061, 2094), 'seaborn.distplot', 'sbn.distplot', (['residual'], {'hist': '(True)'}), '(residual, hist=True)\n', (2073, 2094), True, 'import seaborn as sbn\n'), ((2103, 2119), 'matplotlib.pylab.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2114, 2119), True, 'import matplotlib.pylab as plt\n'), ((2128, 2149), 'seaborn.kdeplot', 'sbn.kdeplot', (['residual'], {}), '(residual)\n', (2139, 2149), True, 'import seaborn as sbn\n'), ((5084, 5121), 'numpy.multiply', 'np.multiply', (['(1 - p)', '(observations == 0)'], {}), '(1 - p, observations == 0)\n', (5095, 5121), True, 'import numpy as np\n'), ((6606, 6640), 'os.path.join', 'os.path.join', (['model_path', 'model_id'], {}), '(model_path, model_id)\n', (6618, 6640), False, 'import os\n'), ((7073, 7107), 'os.path.join', 'os.path.join', (['model_path', 'model_id'], {}), '(model_path, model_id)\n', (7085, 7107), False, 'import os\n'), ((896, 921), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(20)'], {}), '(0.1, 1.0, 20)\n', (907, 921), True, 'import numpy as np\n'), ((1801, 1817), 'numpy.log', 'np.log', (['observed'], {}), '(observed)\n', (1807, 1817), True, 'import numpy as np\n'), ((2286, 2301), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (2299, 2301), False, 'from sklearn.neighbors import KernelDensity\n'), ((2656, 2676), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (2662, 2676), True, 'import numpy as np\n'), ((2678, 2698), 'numpy.log', 'np.log', (['(y + self.eps)'], {}), '(y + self.eps)\n', (2684, 2698), True, 'import numpy as np\n'), ((2810, 2897), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Logistic model couldn\'t fit, because the number of classes is <2"""'], {}), '(\n "Logistic model couldn\'t fit, because the number of classes is <2")\n', (2824, 2897), False, 'from sklearn.exceptions import NotFittedError\n'), ((3338, 3381), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': "param['bandwidth']"}), "(bandwidth=param['bandwidth'])\n", (3351, 3381), False, 'from sklearn.neighbors import KernelDensity\n'), ((3981, 4025), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Call fit before prediction"""'], {}), "('Call fit before prediction')\n", (3995, 4025), False, 'from sklearn.exceptions import NotFittedError\n'), ((4136, 4156), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (4142, 4156), True, 'import numpy as np\n'), ((5219, 5250), 'numpy.log', 'np.log', (['(observations + self.eps)'], {}), '(observations + self.eps)\n', (5225, 5250), True, 'import numpy as np\n'), ((5368, 5400), 'numpy.multiply', 'np.multiply', (['p', 'residual_density'], {}), '(p, residual_density)\n', (5379, 5400), True, 'import numpy as np\n'), ((5631, 5699), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Fit method should be called before save operation."""'], {}), "('Fit method should be called before save operation.')\n", (5645, 5699), False, 'from sklearn.exceptions import NotFittedError\n'), ((6656, 6685), 'os.path.exists', 'os.path.exists', (['current_model'], {}), '(current_model)\n', (6670, 6685), False, 'import os\n'), ((6699, 6725), 'os.makedirs', 'os.makedirs', (['current_model'], {}), '(current_model)\n', (6710, 6725), False, 'import os\n'), ((6756, 6799), 'os.path.join', 'os.path.join', (['current_model', '"""kde_model.pk"""'], {}), "(current_model, 'kde_model.pk')\n", (6768, 6799), False, 'import os\n'), ((6838, 6884), 'os.path.join', 'os.path.join', (['current_model', '"""linear_model.pk"""'], {}), "(current_model, 'linear_model.pk')\n", (6850, 6884), False, 'import os\n'), ((6920, 6968), 'os.path.join', 'os.path.join', (['current_model', '"""logistic_model.pk"""'], {}), "(current_model, 'logistic_model.pk')\n", (6932, 6968), False, 'import os\n'), ((7209, 7237), 'os.path.exists', 'os.path.exists', (['loaded_model'], {}), '(loaded_model)\n', (7223, 7237), False, 'import os\n'), ((7344, 7389), 'os.path.join', 'os.path.join', (['loaded_model', '"""linear_model.pk"""'], {}), "(loaded_model, 'linear_model.pk')\n", (7356, 7389), False, 'import os\n'), ((7417, 7459), 'os.path.join', 'os.path.join', (['loaded_model', '"""kde_model.pk"""'], {}), "(loaded_model, 'kde_model.pk')\n", (7429, 7459), False, 'import os\n'), ((7491, 7538), 'os.path.join', 'os.path.join', (['loaded_model', '"""logistic_model.pk"""'], {}), "(loaded_model, 'logistic_model.pk')\n", (7503, 7538), False, 'import os\n'), ((2316, 2341), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(20)'], {}), '(0.1, 1.0, 20)\n', (2327, 2341), True, 'import numpy as np\n'), ((5545, 5559), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (5551, 5559), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# --1.2.1
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(one_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
# --1.2.2
class double_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(double_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
# --1.2.3
class three_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(three_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
# ops += [nn.Dropout(p=0.1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class resconv2(nn.Module):
def __init__(self,in_ch,out_ch,ksize=3,kstride=1,kpad=1):
super(resconv2,self).__init__()
self.conv1 = nn.Conv2d(in_ch,out_ch,ksize,stride=kstride,padding=kpad)
self.conv2 = nn.Conv2d(out_ch,out_ch,ksize,stride=kstride,padding=kpad)
if in_ch != out_ch:
self.red = nn.Conv2d(in_ch,out_ch,(1,1),stride=1,padding=0)
else:
self.red = None
def forward(self,x):
rx = self.conv1(x)
rx = F.relu(rx)
rx= self.conv2(rx)
rx = F.relu(rx)
if self.red!=None:
x = self.red(x)+rx
else:
x = x + rx
return rx
class up_res(nn.Module):
def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False):
super(up_res, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = nn.Conv2d(up_in_ch,up_out_ch,(3,3))
self.conv2 = resconv2(cat_in_ch,cat_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
x = self.conv2(x)
return x
# --1.3.1
class up(nn.Module):
def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False):
super(up, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = one_conv(up_in_ch,up_out_ch)
self.conv2 = double_conv(cat_in_ch, cat_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
x = self.conv2(x)
return x
# --1.3.2
class upcat(nn.Module):
def __init__(self, up_in_ch, up_out_ch,if_convt=False):
super(upcat, self).__init__()
self.if_convt = if_convt
if self.if_convt:
self.up = nn.ConvTranspose2d(up_in_ch, up_out_ch, 2, stride=2)
else:
self.up = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=False)
self.conv1 = one_conv(up_in_ch,up_out_ch)
def forward(self, x1, x2):
if self.if_convt:
x1 = self.up(x1)
else:
x1 = self.up(x1)
x1 = self.conv1(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
#pad to make up for the loss when downsampling
x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)),
diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5
x = torch.cat([x2, x1], dim=1)
del x2,x1
return x
# --1.4
def change_padding(net,del_or_add='del',pad_size=(1,1)):
for m in net.modules():
if isinstance(m,nn.Conv2d):
m.padding = (0,0) if del_or_add =='del' else pad_size
return net
# --1.5 can only compute linear
def compute_rf(net):
rf_size,rf_pad,rf_stride = 1,0,1
for m in net.modules():
if isinstance(m,(nn.Conv2d,nn.MaxPool2d)):
tmp_kernel_size = m.kernel_size[0] if isinstance(m.kernel_size,(tuple,list)) else m.kernel_size
tmp_padding = m.padding[0] if isinstance(m.padding,(tuple,list)) else m.padding
tmp_stride = m.stride[0] if isinstance(m.stride,(tuple,list)) else m.stride
# rf_pad relates with the last layer's rf_stride
rf_pad += tmp_padding*rf_stride
# rf_size relates with the last layers's rf_stride
rf_size += (tmp_kernel_size-1)*rf_stride
rf_stride *= tmp_stride
return {'rf_size':rf_size,'rf_pad':rf_pad,'rf_stride':rf_stride} | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"math.ceil",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Upsample",
"torch.nn.functional.relu",
"torch.nn.ConvTranspose2d",
"torch.cat"
] | [((466, 485), 'torch.nn.Sequential', 'nn.Sequential', (['*ops'], {}), '(*ops)\n', (479, 485), True, 'import torch.nn as nn\n'), ((1156, 1175), 'torch.nn.Sequential', 'nn.Sequential', (['*ops'], {}), '(*ops)\n', (1169, 1175), True, 'import torch.nn as nn\n'), ((2063, 2082), 'torch.nn.Sequential', 'nn.Sequential', (['*ops'], {}), '(*ops)\n', (2076, 2082), True, 'import torch.nn as nn\n'), ((2314, 2375), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', 'ksize'], {'stride': 'kstride', 'padding': 'kpad'}), '(in_ch, out_ch, ksize, stride=kstride, padding=kpad)\n', (2323, 2375), True, 'import torch.nn as nn\n'), ((2394, 2456), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', 'out_ch', 'ksize'], {'stride': 'kstride', 'padding': 'kpad'}), '(out_ch, out_ch, ksize, stride=kstride, padding=kpad)\n', (2403, 2456), True, 'import torch.nn as nn\n'), ((2670, 2680), 'torch.nn.functional.relu', 'F.relu', (['rx'], {}), '(rx)\n', (2676, 2680), True, 'import torch.nn.functional as F\n'), ((2723, 2733), 'torch.nn.functional.relu', 'F.relu', (['rx'], {}), '(rx)\n', (2729, 2733), True, 'import torch.nn.functional as F\n'), ((3927, 3953), 'torch.cat', 'torch.cat', (['[x2, x1]'], {'dim': '(1)'}), '([x2, x1], dim=1)\n', (3936, 3953), False, 'import torch\n'), ((5091, 5117), 'torch.cat', 'torch.cat', (['[x2, x1]'], {'dim': '(1)'}), '([x2, x1], dim=1)\n', (5100, 5117), False, 'import torch\n'), ((6179, 6205), 'torch.cat', 'torch.cat', (['[x2, x1]'], {'dim': '(1)'}), '([x2, x1], dim=1)\n', (6188, 6205), False, 'import torch\n'), ((258, 296), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(in_ch, out_ch, 3, padding=1)\n', (267, 296), True, 'import torch.nn as nn\n'), ((420, 441), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (427, 441), True, 'import torch.nn as nn\n'), ((745, 783), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(in_ch, out_ch, 3, padding=1)\n', (754, 783), True, 'import torch.nn as nn\n'), ((907, 928), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (914, 928), True, 'import torch.nn as nn\n'), ((947, 986), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(out_ch, out_ch, 3, padding=1)\n', (956, 986), True, 'import torch.nn as nn\n'), ((1110, 1131), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1117, 1131), True, 'import torch.nn as nn\n'), ((1429, 1467), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(in_ch, out_ch, 3, padding=1)\n', (1438, 1467), True, 'import torch.nn as nn\n'), ((1591, 1612), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1598, 1612), True, 'import torch.nn as nn\n'), ((1641, 1680), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(out_ch, out_ch, 3, padding=1)\n', (1650, 1680), True, 'import torch.nn as nn\n'), ((1804, 1825), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1811, 1825), True, 'import torch.nn as nn\n'), ((1854, 1893), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(out_ch, out_ch, 3, padding=1)\n', (1863, 1893), True, 'import torch.nn as nn\n'), ((2017, 2038), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2024, 2038), True, 'import torch.nn as nn\n'), ((2507, 2560), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', '(1, 1)'], {'stride': '(1)', 'padding': '(0)'}), '(in_ch, out_ch, (1, 1), stride=1, padding=0)\n', (2516, 2560), True, 'import torch.nn as nn\n'), ((3089, 3141), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['up_in_ch', 'up_out_ch', '(2)'], {'stride': '(2)'}), '(up_in_ch, up_out_ch, 2, stride=2)\n', (3107, 3141), True, 'import torch.nn as nn\n'), ((3179, 3244), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=2, mode='bilinear', align_corners=False)\n", (3190, 3244), True, 'import torch.nn as nn\n'), ((3339, 3377), 'torch.nn.Conv2d', 'nn.Conv2d', (['up_in_ch', 'up_out_ch', '(3, 3)'], {}), '(up_in_ch, up_out_ch, (3, 3))\n', (3348, 3377), True, 'import torch.nn as nn\n'), ((4258, 4310), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['up_in_ch', 'up_out_ch', '(2)'], {'stride': '(2)'}), '(up_in_ch, up_out_ch, 2, stride=2)\n', (4276, 4310), True, 'import torch.nn as nn\n'), ((4348, 4413), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=2, mode='bilinear', align_corners=False)\n", (4359, 4413), True, 'import torch.nn as nn\n'), ((5404, 5456), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['up_in_ch', 'up_out_ch', '(2)'], {'stride': '(2)'}), '(up_in_ch, up_out_ch, 2, stride=2)\n', (5422, 5456), True, 'import torch.nn as nn\n'), ((5495, 5560), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=2, mode='bilinear', align_corners=False)\n", (5506, 5560), True, 'import torch.nn as nn\n'), ((379, 401), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (393, 401), True, 'import torch.nn as nn\n'), ((866, 888), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (880, 888), True, 'import torch.nn as nn\n'), ((1069, 1091), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (1083, 1091), True, 'import torch.nn as nn\n'), ((1550, 1572), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (1564, 1572), True, 'import torch.nn as nn\n'), ((1763, 1785), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (1777, 1785), True, 'import torch.nn as nn\n'), ((1976, 1998), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (1990, 1998), True, 'import torch.nn as nn\n'), ((3807, 3829), 'math.ceil', 'math.ceil', (['(diffX / 2.0)'], {}), '(diffX / 2.0)\n', (3816, 3829), False, 'import math\n'), ((3873, 3895), 'math.ceil', 'math.ceil', (['(diffY / 2.0)'], {}), '(diffY / 2.0)\n', (3882, 3895), False, 'import math\n'), ((4971, 4993), 'math.ceil', 'math.ceil', (['(diffX / 2.0)'], {}), '(diffX / 2.0)\n', (4980, 4993), False, 'import math\n'), ((5037, 5059), 'math.ceil', 'math.ceil', (['(diffY / 2.0)'], {}), '(diffY / 2.0)\n', (5046, 5059), False, 'import math\n'), ((6059, 6081), 'math.ceil', 'math.ceil', (['(diffX / 2.0)'], {}), '(diffX / 2.0)\n', (6068, 6081), False, 'import math\n'), ((6125, 6147), 'math.ceil', 'math.ceil', (['(diffY / 2.0)'], {}), '(diffY / 2.0)\n', (6134, 6147), False, 'import math\n')] |
"""
=========================================
Robust line model estimation using RANSAC
=========================================
In this example we see how to robustly fit a line model to faulty data using
the RANSAC (random sample consensus) algorithm.
Firstly the data are generated by adding a gaussian noise to a linear function.
Then, the outlier points are added to the data set.
RANSAC iteratively estimates the parameters from the data set.
At each iteration the following steps are performed:
1. Select ``min_samples`` random samples from the original data and check
whether the set of data is valid (see ``is_data_valid`` option).
2. Estimate a model on the random subset
(``model_cls.estimate(*data[random_subset]``) and check whether the
estimated model is valid (see ``is_model_valid`` option).
3. Classify all the data points as either inliers or outliers by calculating
the residuals using the estimated model (``model_cls.residuals(*data)``) -
all data samples with residuals smaller than the ``residual_threshold``
are considered as inliers.
4. If the number of the inlier samples is greater than ever before,
save the estimated model as the best model. In case the current estimated
model has the same number of inliers, it is considered as the best model
only if the sum of residuals is lower.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all the
inlier samples of the previously determined best model.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
x = np.arange(-200, 200)
y = 0.2 * x + 20
data = np.column_stack([x, y])
# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
# add faulty data
faulty = np.array(30 * [(180., -100)])
faulty += 10 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# fit line using all data
model = LineModelND()
model.estimate(data)
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)
fig, ax = plt.subplots()
ax.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
label='Inlier data')
ax.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
label='Outlier data')
ax.plot(line_x, line_y, '-k', label='Line model from all data')
ax.plot(line_x, line_y_robust, '-b', label='Robust line model')
ax.legend(loc='lower left')
plt.show()
######################################################################
# Now, we generalize this example to 3D points.
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
point = np.array([0, 0, 0], dtype='float')
direction = np.array([1, 1, 1], dtype='float') / np.sqrt(3)
xyz = point + 10 * np.arange(-100, 100)[..., np.newaxis] * direction
# add gaussian noise to coordinates
noise = np.random.normal(size=xyz.shape)
xyz += 0.5 * noise
xyz[::2] += 20 * noise[::2]
xyz[::4] += 100 * noise[::4]
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(xyz, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xyz[inliers][:, 0], xyz[inliers][:, 1], xyz[inliers][:, 2], c='b',
marker='o', label='Inlier data')
ax.scatter(xyz[outliers][:, 0], xyz[outliers][:, 1], xyz[outliers][:, 2], c='r',
marker='o', label='Outlier data')
ax.legend(loc='lower left')
plt.show()
| [
"numpy.random.normal",
"numpy.sqrt",
"numpy.column_stack",
"skimage.measure.LineModelND",
"numpy.array",
"skimage.measure.ransac",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1667, 1689), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (1681, 1689), True, 'import numpy as np\n'), ((1726, 1746), 'numpy.arange', 'np.arange', (['(-200)', '(200)'], {}), '(-200, 200)\n', (1735, 1746), True, 'import numpy as np\n'), ((1771, 1794), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (1786, 1794), True, 'import numpy as np\n'), ((1840, 1873), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'data.shape'}), '(size=data.shape)\n', (1856, 1873), True, 'import numpy as np\n'), ((1979, 2009), 'numpy.array', 'np.array', (['(30 * [(180.0, -100)])'], {}), '(30 * [(180.0, -100)])\n', (1987, 2009), True, 'import numpy as np\n'), ((2127, 2140), 'skimage.measure.LineModelND', 'LineModelND', ([], {}), '()\n', (2138, 2140), False, 'from skimage.measure import LineModelND, ransac\n'), ((2252, 2331), 'skimage.measure.ransac', 'ransac', (['data', 'LineModelND'], {'min_samples': '(2)', 'residual_threshold': '(1)', 'max_trials': '(1000)'}), '(data, LineModelND, min_samples=2, residual_threshold=1, max_trials=1000)\n', (2258, 2331), False, 'from skimage.measure import LineModelND, ransac\n'), ((2444, 2464), 'numpy.arange', 'np.arange', (['(-250)', '(250)'], {}), '(-250, 250)\n', (2453, 2464), True, 'import numpy as np\n'), ((2556, 2570), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2568, 2570), True, 'from matplotlib import pyplot as plt\n'), ((2910, 2920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2918, 2920), True, 'from matplotlib import pyplot as plt\n'), ((3188, 3210), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (3202, 3210), True, 'import numpy as np\n'), ((3251, 3285), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': '"""float"""'}), "([0, 0, 0], dtype='float')\n", (3259, 3285), True, 'import numpy as np\n'), ((3460, 3492), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xyz.shape'}), '(size=xyz.shape)\n', (3476, 3492), True, 'import numpy as np\n'), ((3659, 3737), 'skimage.measure.ransac', 'ransac', (['xyz', 'LineModelND'], {'min_samples': '(2)', 'residual_threshold': '(1)', 'max_trials': '(1000)'}), '(xyz, LineModelND, min_samples=2, residual_threshold=1, max_trials=1000)\n', (3665, 3737), False, 'from skimage.measure import LineModelND, ransac\n'), ((3804, 3816), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3814, 3816), True, 'from matplotlib import pyplot as plt\n'), ((4136, 4146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4144, 4146), True, 'from matplotlib import pyplot as plt\n'), ((2024, 2059), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'faulty.shape'}), '(size=faulty.shape)\n', (2040, 2059), True, 'import numpy as np\n'), ((3298, 3332), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': '"""float"""'}), "([1, 1, 1], dtype='float')\n", (3306, 3332), True, 'import numpy as np\n'), ((3335, 3345), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3342, 3345), True, 'import numpy as np\n'), ((3365, 3385), 'numpy.arange', 'np.arange', (['(-100)', '(100)'], {}), '(-100, 100)\n', (3374, 3385), True, 'import numpy as np\n')] |
from flask import Flask, request, jsonify
import os, sys
import helpers, escape_helpers
import logging
import config
from rdflib.namespace import Namespace
##############
# INIT CONFIG
##############
CONFIG = config.load_config(os.environ.get('ENVIRONMENT', "DEBUG"))
app = Flask(__name__)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setLevel(CONFIG["LOG_LEVEL"])
app.logger.addHandler(handler)
# vocabularies
mu = Namespace('http://mu.semte.ch/vocabularies/')
mu_core = Namespace('http://mu.semte.ch/vocabularies/core/')
mu_ext = Namespace('http://mu.semte.ch/vocabularies/ext/')
graph = CONFIG['MU_APPLICATION_GRAPH']
# sparql wrapper
sparql_wrapper = helpers.init_sparql_wrapper(CONFIG)
#################
# API
#################
@app.route('/shopping-carts', methods=["PATCH"])
def associate_cart():
"""
associates cart with mu-session
:return:
"""
# validates
data = request.get_json(force=True)['data']
errors = [helpers.validate_resource_type("shopping-carts", data), helpers.validate_json_api_content_type(request)]
if any(errors):
return next(e for e in errors if e)
cart_id = data.get("id", None)
if not cart_id:
return helpers.error("CART ID missing")
# get session id in the request
session_id = helpers.session_id_header(request)
if not session_id:
return helpers.error("MU_SESSION_ID missing")
# now fetch the cart
query = """SELECT ?cart
WHERE{
GRAPH <http://mu.semte.ch/application> {
?cart <http://mu.semte.ch/vocabularies/core/uuid> %s
}
}
""" % escape_helpers.sparql_escape(cart_id)
carts = helpers.query(app.logger, sparql_wrapper["sparql_query"], query).get('results', []).get('bindings')
if not len(carts) == 1:
return helpers.error("no/too many cart(s) found for {}".format(cart_id))
cart_uri = carts[0]['cart']['value']
# update the cart with session
query = """
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
INSERT DATA
{
GRAPH <http://mu.semte.ch/application> {
<%s> ext:ownerSession <%s>.
}
}
""" % (cart_uri, session_id)
helpers.query(app.logger, sparql_wrapper["sparql_update"], query)
return "", 204
@app.route('/shopping-carts')
def return_associate_cart():
# get session id in the request
session_id = helpers.session_id_header(request)
if not session_id:
return helpers.error("MU_SESSION_ID missing")
# now fetch the carts uid
query = """PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
SELECT ?uid
WHERE {
GRAPH <http://mu.semte.ch/application> {
?uri <http://mu.semte.ch/vocabularies/ext/ownerSession> <%s>.
?uri mu:uuid ?uid
}
}
""" % session_id
uids = helpers.query(app.logger, sparql_wrapper["sparql_query"], query).get('results', []).get('bindings')
return jsonify([e["uid"]["value"] for e in uids]), 200
#######################
## Start Application ##
#######################
if __name__ == '__main__':
app.logger.info("---cart-service is starting")
app.run(host='0.0.0.0', port=80, debug=True) | [
"helpers.session_id_header",
"logging.StreamHandler",
"helpers.query",
"flask.Flask",
"os.environ.get",
"rdflib.namespace.Namespace",
"escape_helpers.sparql_escape",
"flask.request.get_json",
"helpers.validate_json_api_content_type",
"helpers.validate_resource_type",
"helpers.error",
"helpers.... | [((276, 291), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'from flask import Flask, request, jsonify\n'), ((303, 343), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stderr'}), '(stream=sys.stderr)\n', (324, 343), False, 'import logging\n'), ((434, 479), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://mu.semte.ch/vocabularies/"""'], {}), "('http://mu.semte.ch/vocabularies/')\n", (443, 479), False, 'from rdflib.namespace import Namespace\n'), ((490, 540), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://mu.semte.ch/vocabularies/core/"""'], {}), "('http://mu.semte.ch/vocabularies/core/')\n", (499, 540), False, 'from rdflib.namespace import Namespace\n'), ((550, 599), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://mu.semte.ch/vocabularies/ext/"""'], {}), "('http://mu.semte.ch/vocabularies/ext/')\n", (559, 599), False, 'from rdflib.namespace import Namespace\n'), ((675, 710), 'helpers.init_sparql_wrapper', 'helpers.init_sparql_wrapper', (['CONFIG'], {}), '(CONFIG)\n', (702, 710), False, 'import helpers, escape_helpers\n'), ((230, 268), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""', '"""DEBUG"""'], {}), "('ENVIRONMENT', 'DEBUG')\n", (244, 268), False, 'import os, sys\n'), ((1298, 1332), 'helpers.session_id_header', 'helpers.session_id_header', (['request'], {}), '(request)\n', (1323, 1332), False, 'import helpers, escape_helpers\n'), ((2246, 2311), 'helpers.query', 'helpers.query', (['app.logger', "sparql_wrapper['sparql_update']", 'query'], {}), "(app.logger, sparql_wrapper['sparql_update'], query)\n", (2259, 2311), False, 'import helpers, escape_helpers\n'), ((2446, 2480), 'helpers.session_id_header', 'helpers.session_id_header', (['request'], {}), '(request)\n', (2471, 2480), False, 'import helpers, escape_helpers\n'), ((918, 946), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (934, 946), False, 'from flask import Flask, request, jsonify\n'), ((970, 1024), 'helpers.validate_resource_type', 'helpers.validate_resource_type', (['"""shopping-carts"""', 'data'], {}), "('shopping-carts', data)\n", (1000, 1024), False, 'import helpers, escape_helpers\n'), ((1026, 1073), 'helpers.validate_json_api_content_type', 'helpers.validate_json_api_content_type', (['request'], {}), '(request)\n', (1064, 1073), False, 'import helpers, escape_helpers\n'), ((1211, 1243), 'helpers.error', 'helpers.error', (['"""CART ID missing"""'], {}), "('CART ID missing')\n", (1224, 1243), False, 'import helpers, escape_helpers\n'), ((1372, 1410), 'helpers.error', 'helpers.error', (['"""MU_SESSION_ID missing"""'], {}), "('MU_SESSION_ID missing')\n", (1385, 1410), False, 'import helpers, escape_helpers\n'), ((1624, 1661), 'escape_helpers.sparql_escape', 'escape_helpers.sparql_escape', (['cart_id'], {}), '(cart_id)\n', (1652, 1661), False, 'import helpers, escape_helpers\n'), ((2520, 2558), 'helpers.error', 'helpers.error', (['"""MU_SESSION_ID missing"""'], {}), "('MU_SESSION_ID missing')\n", (2533, 2558), False, 'import helpers, escape_helpers\n'), ((2989, 3031), 'flask.jsonify', 'jsonify', (["[e['uid']['value'] for e in uids]"], {}), "([e['uid']['value'] for e in uids])\n", (2996, 3031), False, 'from flask import Flask, request, jsonify\n'), ((1675, 1739), 'helpers.query', 'helpers.query', (['app.logger', "sparql_wrapper['sparql_query']", 'query'], {}), "(app.logger, sparql_wrapper['sparql_query'], query)\n", (1688, 1739), False, 'import helpers, escape_helpers\n'), ((2878, 2942), 'helpers.query', 'helpers.query', (['app.logger', "sparql_wrapper['sparql_query']", 'query'], {}), "(app.logger, sparql_wrapper['sparql_query'], query)\n", (2891, 2942), False, 'import helpers, escape_helpers\n')] |
"""
This file illustrates a few examples of using pgmock with pytest.
A postgres testing database from pytest-pgsql (https://github.com/CloverHealth/pytest-pgsql)
is used and a fixture is created for using the mock context manager. This is the
preferred way of using pgmock, but it's also possible to render SQL yourself and execute
patched versions of it. Examples of this are also included here
"""
import pytest
import pgmock
@pytest.fixture
def pgmocker(transacted_postgresql_db):
with pgmock.mock(transacted_postgresql_db.connection) as mocker:
yield mocker
def test_table_patching_w_mocker(transacted_postgresql_db, pgmocker):
"""Tests patching a table while using the mocker returned by ``pgmock.mock``"""
pgmocker.patch(pgmock.table('test_table'), [('val1', 'val2'), ('val3', 'val4')], ['c1', 'c2'])
results = list(transacted_postgresql_db.connection.execute('SELECT * from test_table'))
assert results == [('val1', 'val2'), ('val3', 'val4')]
def test_patch_subquery_from_file(transacted_postgresql_db, tmpdir):
"""Tests reading a subquery from a file and testing a patched version of it"""
# Create the example file
file_name = tmpdir.join('file.sql')
file_name.write('SELECT sub.c1, sub.c2 FROM (SELECT * FROM test_table) sub;')
# Read the subquery 'sub' from the file
subquery = pgmock.sql_file(str(file_name), pgmock.subquery('sub'))
assert subquery == 'SELECT * FROM test_table'
# Patch the table of the subquery and verify it returns the proper results
patched = pgmock.sql(subquery, pgmock.patch(
pgmock.table('test_table'),
rows=[('v1', 'v2'), ('v3', 'v4')],
cols=['c1', 'c2']
))
assert (
patched == "SELECT * FROM (VALUES ('v1','v2'),('v3','v4')) AS test_table(\"c1\",\"c2\")"
)
# Patches can also be applied with list of dictionaries, filling in only what's needed.
# Column names must still be provided. null values will be filled for all missing columns
patched = pgmock.sql(subquery, pgmock.patch(
pgmock.table('test_table'),
rows=[{'c1': 'v1'}, {'c2': 'v4'}],
cols=['c1', 'c2']
))
assert (
patched == "SELECT * FROM (VALUES ('v1',null),(null,'v4')) AS test_table(\"c1\",\"c2\")"
)
results = list(transacted_postgresql_db.connection.execute(patched))
assert results == [('v1', None), (None, 'v4')]
| [
"pgmock.table",
"pgmock.subquery",
"pgmock.mock"
] | [((498, 546), 'pgmock.mock', 'pgmock.mock', (['transacted_postgresql_db.connection'], {}), '(transacted_postgresql_db.connection)\n', (509, 546), False, 'import pgmock\n'), ((754, 780), 'pgmock.table', 'pgmock.table', (['"""test_table"""'], {}), "('test_table')\n", (766, 780), False, 'import pgmock\n'), ((1384, 1406), 'pgmock.subquery', 'pgmock.subquery', (['"""sub"""'], {}), "('sub')\n", (1399, 1406), False, 'import pgmock\n'), ((1595, 1621), 'pgmock.table', 'pgmock.table', (['"""test_table"""'], {}), "('test_table')\n", (1607, 1621), False, 'import pgmock\n'), ((2060, 2086), 'pgmock.table', 'pgmock.table', (['"""test_table"""'], {}), "('test_table')\n", (2072, 2086), False, 'import pgmock\n')] |
# -*- coding: utf-8 -*-
__author__ = '<NAME> <<EMAIL>>'
import json
import BaseHTTPServer
import threading
from urlparse import parse_qs, urlparse
import status
callbacks = dict()
class JsonHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
print("path:", self.path)
if self.path == '/status.json':
data = {
'api' : '0.13',
'space' : 'Stratum 0',
'logo' : 'https:\/\/stratum0.org\/mediawiki\/images\/thumb\/c\/c6\/Sanduhr-twitter-avatar-black.svg\/240px-Sanduhr-twitter-avatar-black.svg.png',
'url': 'https:\/\/stratum0.org',
'location' : {
'address': 'Hamburger Strasse 273a, 38114 Braunschweig, Germany',
'lon' : 10.5211247,
'lat' : 52.2785658
},
'state' : {
'open' : status.space['open'],
'lastchange' : status.space['last_change'],
'trigger_person' : status.space['by'],
'icon' : {
'open' : 'http:\/\/status.stratum0.org\/open_square.png',
'closed' : 'http:\/\/status.stratum0.org\/closed_square.png'
},
'ext_since' : status.space['since']
},
'contact' : {
'phone' : '+4953128769245',
'twitter' : '@stratum0',
'ml' : '<EMAIL>',
'issue-mail' : 'cm9oaWViK3NwYWNlYXBpLWlzc3Vlc0Byb2hpZWIubmFtZQ==',
'irc' : 'irc:\/\/irc.freenode.net\/#stratum0'
},
'issue_report_channels' : [
'issue-mail'
]
}
data_string = json.dumps(data)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(data_string)
self.wfile.write('\n')
elif self.path.startswith('/update?'):
queryurl = urlparse(self.path)
params = parse_qs(queryurl.query)
if len(params) > 0:
by = ''
if 'by' in params:
by = params['by'][0]
status.update(params['open'][0]=='true', by)
callbacks['send_status']()
self.send_response(200)
else:
self.send_response(400)
else:
self.send_response(404)
class JsonServer(threading.Thread):
def __init__(self, address):
super(JsonServer, self).__init__()
self.address = address
self.stop_requested = False
def run(self):
self.httpd = BaseHTTPServer.HTTPServer(self.address, JsonHandler)
while not self.stop_requested:
self.httpd.handle_request()
def stop(self):
self.stop_requested = True
| [
"urlparse.parse_qs",
"status.update",
"json.dumps",
"BaseHTTPServer.HTTPServer",
"urlparse.urlparse"
] | [((2795, 2847), 'BaseHTTPServer.HTTPServer', 'BaseHTTPServer.HTTPServer', (['self.address', 'JsonHandler'], {}), '(self.address, JsonHandler)\n', (2820, 2847), False, 'import BaseHTTPServer\n'), ((1823, 1839), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1833, 1839), False, 'import json\n'), ((2119, 2138), 'urlparse.urlparse', 'urlparse', (['self.path'], {}), '(self.path)\n', (2127, 2138), False, 'from urlparse import parse_qs, urlparse\n'), ((2160, 2184), 'urlparse.parse_qs', 'parse_qs', (['queryurl.query'], {}), '(queryurl.query)\n', (2168, 2184), False, 'from urlparse import parse_qs, urlparse\n'), ((2335, 2381), 'status.update', 'status.update', (["(params['open'][0] == 'true')", 'by'], {}), "(params['open'][0] == 'true', by)\n", (2348, 2381), False, 'import status\n')] |
from django.contrib.auth.models import Permission, Group
from rest_framework import viewsets, mixins, response, status
from rest_framework.generics import get_object_or_404
from .serializer import PermissionSerializer
from .common import get_permission_obj
from .filter import PermissionFilter
class PermissionsViewset(viewsets.ReadOnlyModelViewSet):
"""
权限列表 视图类
list:
返回permission列表
"""
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
filter_class = PermissionFilter
filter_fields = ("name",)
def get_queryset(self):
queryset = super(PermissionsViewset, self).get_queryset()
queryset = queryset.order_by("content_type__id")
return queryset
class GroupPermissionsViewset(viewsets.ReadOnlyModelViewSet,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
"""
用户组权限
retrieve:
返回用户组的权限列表
update:
给指定用户组增加权限,参数pid: permission id
destroy:
删除指定组下的权限,参数pid: permission id
"""
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
filter_class = PermissionFilter
filter_fields = ("name",)
def process_permission(self, group_permission_queryset, data):
for record in data:
try:
group_permission_queryset.get(pk=record.get("id", None))
record["status"] = True
except Exception:
pass
return data
def get_group_permissions(self):
groupobj = self.get_object()
queryset = groupobj.permissions.all()
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return response.Response(serializer.data)
def get_modify_permissions(self):
groupobj = self.get_object()
group_permission_queryset = groupobj.permissions.all()
queryset = Permission.objects.all()
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(self.process_permission(group_permission_queryset, serializer.data))
serializer = self.get_serializer(queryset, many=True)
return response.Response(self.process_permission(group_permission_queryset, serializer.data))
def retrieve(self, request, *args, **kwargs):
modify = request.GET.get("modify", None)
if modify is not None:
return self.get_modify_permissions()
else:
return self.get_group_permissions()
def get_object(self):
queryset = Group.objects.all()
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def update(self, request, *args, **kwargs):
ret = {"status": 0}
groupobj = self.get_object()
permission_obj = get_permission_obj(request.data.get("pid", ""))
if permission_obj is None:
ret["status"] = 1
ret["errmsg"] = "permission 不存在"
else:
groupobj.permissions.add(permission_obj)
return response.Response(ret, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
ret = {"status": 0}
groupobj = self.get_object()
permission_obj = get_permission_obj(request.data.get("pid", ""))
if permission_obj is None:
ret["status"] = 1
ret["errmsg"] = "permission 不存在"
else:
groupobj.permissions.remove(permission_obj)
return response.Response(ret, status=status.HTTP_200_OK) | [
"django.contrib.auth.models.Permission.objects.all",
"rest_framework.response.Response",
"rest_framework.generics.get_object_or_404",
"django.contrib.auth.models.Group.objects.all"
] | [((428, 452), 'django.contrib.auth.models.Permission.objects.all', 'Permission.objects.all', ([], {}), '()\n', (450, 452), False, 'from django.contrib.auth.models import Permission, Group\n'), ((1081, 1105), 'django.contrib.auth.models.Permission.objects.all', 'Permission.objects.all', ([], {}), '()\n', (1103, 1105), False, 'from django.contrib.auth.models import Permission, Group\n'), ((1965, 1999), 'rest_framework.response.Response', 'response.Response', (['serializer.data'], {}), '(serializer.data)\n', (1982, 1999), False, 'from rest_framework import viewsets, mixins, response, status\n'), ((2158, 2182), 'django.contrib.auth.models.Permission.objects.all', 'Permission.objects.all', ([], {}), '()\n', (2180, 2182), False, 'from django.contrib.auth.models import Permission, Group\n'), ((2941, 2960), 'django.contrib.auth.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (2958, 2960), False, 'from django.contrib.auth.models import Permission, Group\n'), ((3433, 3477), 'rest_framework.generics.get_object_or_404', 'get_object_or_404', (['queryset'], {}), '(queryset, **filter_kwargs)\n', (3450, 3477), False, 'from rest_framework.generics import get_object_or_404\n'), ((3974, 4023), 'rest_framework.response.Response', 'response.Response', (['ret'], {'status': 'status.HTTP_200_OK'}), '(ret, status=status.HTTP_200_OK)\n', (3991, 4023), False, 'from rest_framework import viewsets, mixins, response, status\n'), ((4407, 4456), 'rest_framework.response.Response', 'response.Response', (['ret'], {'status': 'status.HTTP_200_OK'}), '(ret, status=status.HTTP_200_OK)\n', (4424, 4456), False, 'from rest_framework import viewsets, mixins, response, status\n')] |
'''
Module with helper classes to create new persistables
'''
from abc import ABCMeta, abstractmethod
from simpleml.persistables.meta_registry import SIMPLEML_REGISTRY
from simpleml.datasets.base_dataset import Dataset
from simpleml.pipelines.base_pipeline import Pipeline
from simpleml.models.base_model import Model
from simpleml.metrics.base_metric import Metric
from simpleml.utils.errors import TrainingError
import logging
from future.utils import with_metaclass
LOGGER = logging.getLogger(__name__)
__author__ = '<NAME>'
class PersistableCreator(with_metaclass(ABCMeta, object)):
@classmethod
def retrieve_or_create(self, **kwargs):
'''
Wrapper method to first attempt to retrieve a matching persistable and
then create a new one if it isn't found
'''
cls, filters = self.determine_filters(**kwargs)
persistable = self.retrieve(cls, filters)
if persistable is not None:
LOGGER.info('Using existing persistable: {}, {}, {}'.format(cls.__tablename__, persistable.name, persistable.version))
persistable.load()
return persistable
else:
LOGGER.info('Existing {} not found. Creating new one now'.format(cls.__tablename__))
persistable = self.create(**kwargs)
LOGGER.info('Using new persistable: {}, {}, {}'.format(cls.__tablename__, persistable.name, persistable.version))
return persistable
@staticmethod
def retrieve(cls, filters):
'''
Query database using the table model (cls) and filters for a matching
persistable
'''
return cls.where(**filters).order_by(cls.version.desc()).first()
@staticmethod
def retrieve_dependency(dependency_cls, **dependency_kwargs):
'''
Base method to query for dependency
Raises TrainingError if dependency does not exist
'''
dependency = dependency_cls.retrieve(
*dependency_cls.determine_filters(**dependency_kwargs))
if dependency is None:
raise TrainingError('Expected dependency is missing')
dependency.load()
return dependency
@abstractmethod
def determine_filters(cls, strict=False, **kwargs):
'''
method to determine which filters to apply when looking for
existing persistable
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
Default design iterates through 2 (or 3) options when retrieving persistables:
1) By name and version (unique properties that define persistables)
2) By name, registered_name, and computed hash
2.5) Optionally, just use name and registered_name (assumes class
definition is the same and would result in an identical persistable)
Returns: database class, filter dictionary
'''
@abstractmethod
def create(cls, **kwargs):
'''
method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
'''
@staticmethod
def retrieve_from_registry(registered_name):
'''
stateless method to query registry for class definitions. handles errors
'''
cls = SIMPLEML_REGISTRY.get(registered_name)
if cls is None:
raise TrainingError('Referenced class unregistered: {}'.format(registered_name))
return cls
class DatasetCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=True, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to assume same class and name = same persistable,
or, load the data and compare the hash
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
registered_name = kwargs.pop('registered_name')
# Check if dependency object was passed
pipeline = kwargs.pop('pipeline', None)
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
if strict:
# Build dummy object to retrieve hash to look for
new_dataset = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_dataset.add_pipeline(pipeline)
new_dataset.build_dataframe()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_dataset._hash()
}
else:
# Assume combo of name, class, and pipeline will be unique
filters = {
'name': name,
'registered_name': registered_name,
'pipeline_id': pipeline.id if pipeline is not None else None
}
return Dataset, filters
@classmethod
def create(cls, registered_name, pipeline=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param dataset_pipeline: dataset pipeline object
'''
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
new_dataset = cls.retrieve_from_registry(registered_name)(**kwargs)
new_dataset.add_pipeline(pipeline)
new_dataset.build_dataframe()
new_dataset.save()
return new_dataset
@classmethod
def retrieve_pipeline(cls, **pipeline_kwargs):
# Datasets do not require dataset pipelines so return None if it isn't passed
if not pipeline_kwargs:
LOGGER.warning('Dataset Pipeline parameters not passed, skipping dependencies. \
Only use this if dataset is already in the right format!')
return None
return cls.retrieve_dependency(PipelineCreator, **pipeline_kwargs)
class PipelineCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
# Check if dependency object was passed
dataset = kwargs.pop('dataset', None)
if dataset is None:
# Use dependency reference to retrieve object
dataset = cls.retrieve_dataset(**kwargs.pop('dataset_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_pipeline = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_pipeline.add_dataset(dataset)
if strict:
new_pipeline.fit()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_pipeline._hash()
}
return Pipeline, filters
@classmethod
def create(cls, registered_name, dataset=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param dataset: dataset object
'''
if dataset is None:
# Use dependency reference to retrieve object
dataset = cls.retrieve_dataset(**kwargs.pop('dataset_kwargs', {}))
new_pipeline = cls.retrieve_from_registry(registered_name)(**kwargs)
new_pipeline.add_dataset(dataset)
new_pipeline.fit()
new_pipeline.save()
return new_pipeline
@classmethod
def retrieve_dataset(cls, **dataset_kwargs):
return cls.retrieve_dependency(DatasetCreator, **dataset_kwargs)
class ModelCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name='', version=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
if version is not None:
filters = {
'name': name,
'version': version
}
else:
# Check if dependency object was passed
pipeline = kwargs.pop('pipeline', None)
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_model = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_model.add_pipeline(pipeline)
if strict:
new_model.fit()
filters = {
'name': name,
'registered_name': registered_name,
'hash_': new_model._hash()
}
return Model, filters
@classmethod
def create(cls, registered_name, pipeline=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param pipeline: pipeline object
'''
if pipeline is None:
# Use dependency reference to retrieve object
pipeline = cls.retrieve_pipeline(**kwargs.pop('pipeline_kwargs', {}))
new_model = cls.retrieve_from_registry(registered_name)(**kwargs)
new_model.add_pipeline(pipeline)
new_model.fit()
new_model.save()
return new_model
@classmethod
def retrieve_pipeline(cls, **pipeline_kwargs):
return cls.retrieve_dependency(PipelineCreator, **pipeline_kwargs)
class MetricCreator(PersistableCreator):
@classmethod
def determine_filters(cls, name=None, model_id=None, strict=False, **kwargs):
'''
stateless method to determine which filters to apply when looking for
existing persistable
Returns: database class, filter dictionary
:param registered_name: Class name registered in SimpleML
:param strict: whether to fit objects first before assuming they are identical
In theory if all inputs and classes are the same, the outputs should deterministically
be the same as well (up to random iter). So, you dont need to fit objects
to be sure they are the same
'''
# Check if dependency object was passed
model = kwargs.pop('model', None)
if name is not None and (model_id is not None or model is not None):
# Can't use default name because metrics are hard coded to reflect dataset split + class
filters = {
'name': name,
'model_id': model_id if model_id is not None else model.id,
}
else:
if model is None:
# Use dependency reference to retrieve object
model = cls.retrieve_model(**kwargs.pop('model_kwargs', {}))
# Build dummy object to retrieve hash to look for
registered_name = kwargs.pop('registered_name')
new_metric = cls.retrieve_from_registry(registered_name)(name=name, **kwargs)
new_metric.add_model(model)
if strict:
new_metric.score()
filters = {
'name': new_metric.name,
'registered_name': registered_name,
'hash_': new_metric._hash()
}
return Metric, filters
@classmethod
def create(cls, registered_name, model=None, **kwargs):
'''
Stateless method to create a new persistable with the desired parameters
kwargs are passed directly to persistable
:param registered_name: Class name registered in SimpleML
:param model: model class
'''
if model is None:
# Use dependency reference to retrieve object
model = cls.retrieve_model(**kwargs.pop('model_kwargs', {}))
new_metric = cls.retrieve_from_registry(registered_name)(**kwargs)
new_metric.add_model(model)
new_metric.score()
new_metric.save()
return new_metric
@classmethod
def retrieve_model(cls, **model_kwargs):
return cls.retrieve_dependency(ModelCreator, **model_kwargs)
| [
"logging.getLogger",
"simpleml.utils.errors.TrainingError",
"future.utils.with_metaclass",
"simpleml.persistables.meta_registry.SIMPLEML_REGISTRY.get"
] | [((479, 506), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (496, 506), False, 'import logging\n'), ((557, 588), 'future.utils.with_metaclass', 'with_metaclass', (['ABCMeta', 'object'], {}), '(ABCMeta, object)\n', (571, 588), False, 'from future.utils import with_metaclass\n'), ((3503, 3541), 'simpleml.persistables.meta_registry.SIMPLEML_REGISTRY.get', 'SIMPLEML_REGISTRY.get', (['registered_name'], {}), '(registered_name)\n', (3524, 3541), False, 'from simpleml.persistables.meta_registry import SIMPLEML_REGISTRY\n'), ((2077, 2124), 'simpleml.utils.errors.TrainingError', 'TrainingError', (['"""Expected dependency is missing"""'], {}), "('Expected dependency is missing')\n", (2090, 2124), False, 'from simpleml.utils.errors import TrainingError\n')] |
#!/usr/bin/env python
"""Generates a poller file that will be used as input to runsinglehap.py, hapsequencer.py, runmultihap.py or
hapmultisequencer.py based on the files or rootnames listed user-specified list file.
USAGE
>>> python drizzlepac/haputils/make_poller_files.py <input filename> -[ost]
- input filename: Name of a file containing a list of calibrated fits files (ending with "_flt.fits" or
"_flc.fits") or rootnames (9 characters, usually ending with a "q" to process. The corresponding
flc.fits or flt.fits files must exist in the user-specified path, the current working directory or the
online cache
- The '-o' optional input allows users to input the name of an output poller file that will be created.
If not explicitly specified, the poller file will be named "poller_file.out".
- The '-s' optional input allows users to input the Name of the skycell. The correct syntax for skycell
names is "skycell-pNNNNxXXyXX", where NNNN is the 4-digit projection cell number, and XX and YY are the
two-digit X and Y skycell indices, respectively. NOTE: this input argument is not needed for SVM poller
file creation, but *REQUIRED* for MVM poller file creation. Users can determine the skycell(s) that
their observations occupy using the ``haputils.which_skycell`` script.
- The '-t' optional input allows users to specify the type of poller file that will be created. The
valid input options are "svm" to create a poller file for use with the single-visit mosaics pipeline
or "mvm" to create a poller file for use with the multiple-visit mosaics pipeline. If not explicitly
specified, the default value is "svm". NOTE: if creating a MVM poller file, one must specify the
skycell name using the "-s" input argument.
Python USAGE:
>>> python
>>> from drizzlepac.haputils import make_poller_files
>>> make_poller_files.generate_poller_file(input_list, poller_file_type='svm', output_poller_filename="poller_file.out", skycell_name=None):
"""
import argparse
import os
import re
import sys
from astropy.io import fits
from drizzlepac.haputils import poller_utils
__taskname__ = 'make_poller_files'
def generate_poller_file(input_list, poller_file_type='svm', output_poller_filename="poller_file.out",
skycell_name=None):
"""Creates a properly formatted SVM or MVM poller file.
Parameters
----------
input_list : str
Name of the text file containing the list of filenames or rootnames to process
poller_file_type : str, optional
Type of poller file to create. 'svm' for single visit mosaic, 'mvm' for multi-visit mosaic. Default
value is 'svm'.
output_poller_filename : str, optional
Name of the output poller file that will be created. Default value is 'poller_file.out'.
skycell_name : str, optional
Name of the skycell to use when creating a MVM poller file. skycell_name is REQUIRED for the creation
of a MVM poller file, but completely unnecessary for the creation of a SVM poller file. The correct
syntax for skycell names is 'skycell-pNNNNxXXyXX', where NNNN is the 4-digit projection cell number,
and XX and YY are the two-digit X and Y skycell indices, respectively. Default value is logical
'None'. NOTE: this input argument is not needed for SVM poller file creation, but *REQUIRED* for MVM
poller file creation. Users can determine the skycell(s) that their observations occupy using the
``haputils.which_skycell`` script.
Returns
-------
Nothing.
"""
if poller_file_type == 'svm' and skycell_name:
print("PROTIP: Users only need to provide a skycell name for the creation of MVM poller files, not SVM poller files.")
# Open rootname list file
f = open(input_list, 'r')
rootname_list = f.readlines()
f.close()
output_list = []
for rootname in rootname_list:
rootname = rootname.strip()
fullfilepath = locate_fitsfile(rootname)
if len(fullfilepath) > 0:
if rootname.endswith(".fits"):
print("Found fits file {}".format(fullfilepath))
else:
print("Rootname {}: Found fits file {}".format(rootname, fullfilepath))
imgname = fullfilepath.split(os.sep)[-1]
else:
# Warn user if no fits file can be located for a given rootname, and skip processing of the file.
if rootname.endswith(".fits"):
item_type = "filename"
else:
item_type = "rootname"
print("WARNING: No fits file found for {} '{}'. This {} will be omitted from the poller file.".format(item_type, rootname, item_type))
continue
# Build each individual poller file line
linelist = []
linelist.append(imgname)
imghdu = fits.open(fullfilepath)
imghdr = imghdu[0].header
linelist.append("{}".format(imghdr['proposid']))
linelist.append(imgname.split("_")[-2][1:4].upper())
linelist.append(imghdr['linenum'].split(".")[0])
linelist.append("{}".format(imghdr['exptime']))
if imghdr['INSTRUME'].lower() == "acs":
filter = poller_utils.determine_filter_name("{};{}".format(imghdr['FILTER1'], imghdr['FILTER2']))
elif imghdr['INSTRUME'].lower() == "wfc3":
filter = poller_utils.determine_filter_name(imghdr['FILTER'])
linelist.append(filter.upper())
linelist.append(imghdr['detector'].upper())
if poller_file_type == 'mvm': # Additional stuff to add to MVM poller files
if skycell_name:
pattern = re.compile("(skycell-p\d{4}x\d{2}y\d{2})")
skycell_name_format_check = pattern.match(skycell_name)
if skycell_name_format_check:
linelist.append("{}".format(skycell_name))
else:
raise ValueError("'{}' is an improperly formatted skycell name. Please refer to documentation for information regarding correct skycell name syntax.".format(skycell_name))
else:
raise Exception("No skycell name was provided. The name of the skycell that the observations occupy is required for MVM poller file creation.")
linelist.append("NEW")
linelist.append(fullfilepath)
imghdu.close()
# Append newly created poller file line to the list of lines to be written to the output file.
output_list.append(",".join(linelist))
# adding carriage returns to all but the very last line in the output file.
list_size = len(output_list)
for ctr in range(0, list_size):
if ctr != list_size-1:
trailing_char = "\n"
else:
trailing_char = ""
output_list[ctr] = output_list[ctr]+trailing_char
# write output poller file
with open(output_poller_filename, 'w') as f:
f.writelines(output_list)
print("wrote {} poller file '{}'.".format(poller_file_type.upper(), output_poller_filename))
# ============================================================================================================
def locate_fitsfile(search_string):
"""returns full file name (fullpath + filename) for a specified rootname or filename. The search
algorithm looks for the file in the following order:
- Search for a _flc.fits file in the current working directory
- Search for a _flt.fits file in the current working directory
- Search for a _flc.fits file in subdirectory in the path specified in $DATA_PATH
- Search for a _flt.fits file in subdirectory in the path specified in $DATA_PATH
Parameters
----------
search_string : str
rootname or filename to locate
Returns
-------
fullfilepath : str
full file path + image name of specified search_string.
"""
if search_string.endswith("_flt.fits") or search_string.endswith("_flc.fits"): # Process search_string as a full filename
# Look in user-provided path (assuming they provided one)
if os.path.exists(search_string) and os.sep in search_string:
return search_string
# Look for files in CWD
if os.path.exists(search_string) and os.sep not in search_string:
return os.getcwd() + os.sep + search_string
# If not found in CWD, look elsewhere...
if not os.getenv("DATA_PATH"):
sys.exit("ERROR: Undefined online cache data root path. Please set environment variable 'DATA_PATH'")
fullfilepath = "{}{}{}{}{}{}{}".format(os.getenv("DATA_PATH"), os.sep, search_string[:4],
os.sep, search_string[:-9], os.sep, search_string)
if os.path.exists(search_string):
return fullfilepath
else:
return "" # Return a null string if no file is found
else: # Process search_string as a rootname
# Look for files in CWD first
for fits_ext in ["flc", "flt"]:
if os.path.exists("{}_{}.fits".format(search_string, fits_ext)):
return "{}{}{}_{}.fits".format(os.getcwd(), os.sep, search_string, fits_ext)
# If not found in CWD, look elsewhere...
if not os.getenv("DATA_PATH"):
sys.exit("ERROR: Undefined online cache data root path. Please set environment variable 'DATA_PATH'")
filenamestub = "{}{}{}{}{}{}{}".format(os.getenv("DATA_PATH"), os.sep, search_string[:4],
os.sep, search_string, os.sep, search_string)
for fits_ext in ["flc", "flt"]:
if os.path.exists("{}_{}.fits".format(filenamestub, fits_ext)):
return "{}_{}.fits".format(filenamestub, fits_ext)
# it should never get here unless no file was found either locally or elsewhere in $DATA_PATH.
return "" # Return a null string if no file is found
# ============================================================================================================
if __name__ == '__main__':
# Parse input arguments
parser = argparse.ArgumentParser(description='Create a HAP SVM or MVM poller file')
parser.add_argument('input_list',
help='Name of a file containing a list of calibrated fits files (ending with '
'"_flt.fits" or "_flc.fits") or rootnames (9 characters, usually ending '
'with a "q" to process. The corresponding flc.fits or flt.fits files must '
'exist in the user-specified path, the current working directory or the online '
'cache')
parser.add_argument('-o', '--output_poller_filename', required=False, default="poller_file.out",
help='Name of an output poller file that will be created. If not explicitly '
'specified, the poller file will be named "poller_file.out".')
parser.add_argument('-s', '--skycell_name', required=False, default="None",
help='Name of the skycell. The correct syntax for skycell names is '
'"skycell-pNNNNxXXyXX", where NNNN is the 4-digit projection cell number, and '
'XX and YY are the two-digit X and Y skycell indices, respectively. NOTE: this '
'input argument is not needed for SVM poller file creation, but *REQUIRED* for '
'MVM poller file creation. Users can determine the skycell(s) that their '
'observations occupy using the haputils.which_skycell.py script.')
parser.add_argument('-t', '--poller_file_type', required=False, choices=['svm', 'mvm'], default='svm',
help='Type of poller file to be created. "svm" to create a poller file for use with '
'the single-visit mosaics pipeline and "mvm" to create a poller file for use '
'with the multiple-visit mosaics pipeline. If not explicitly '
'specified, the default value is "svm". NOTE: if creating a MVM poller file, '
'one must specify the skycell name using the "-s" input argument.')
in_args = parser.parse_args()
# reformat input args
if in_args.skycell_name == 'None':
in_args.skycell_name = None
# logic to make sure user has specified the skycell name if a MVM poller file is to be created.
if in_args.poller_file_type == "mvm" and in_args.skycell_name is None:
parser.error("ERROR: To create a MVM poller file, a skycell name must be specified with the '-s' argument.")
generate_poller_file(in_args.input_list,
poller_file_type=in_args.poller_file_type,
output_poller_filename=in_args.output_poller_filename,
skycell_name=in_args.skycell_name)
| [
"os.path.exists",
"os.getenv",
"argparse.ArgumentParser",
"re.compile",
"os.getcwd",
"drizzlepac.haputils.poller_utils.determine_filter_name",
"sys.exit",
"astropy.io.fits.open"
] | [((10193, 10267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a HAP SVM or MVM poller file"""'}), "(description='Create a HAP SVM or MVM poller file')\n", (10216, 10267), False, 'import argparse\n'), ((4926, 4949), 'astropy.io.fits.open', 'fits.open', (['fullfilepath'], {}), '(fullfilepath)\n', (4935, 4949), False, 'from astropy.io import fits\n'), ((8817, 8846), 'os.path.exists', 'os.path.exists', (['search_string'], {}), '(search_string)\n', (8831, 8846), False, 'import os\n'), ((8154, 8183), 'os.path.exists', 'os.path.exists', (['search_string'], {}), '(search_string)\n', (8168, 8183), False, 'import os\n'), ((8289, 8318), 'os.path.exists', 'os.path.exists', (['search_string'], {}), '(search_string)\n', (8303, 8318), False, 'import os\n'), ((8472, 8494), 'os.getenv', 'os.getenv', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (8481, 8494), False, 'import os\n'), ((8508, 8619), 'sys.exit', 'sys.exit', (['"""ERROR: Undefined online cache data root path. Please set environment variable \'DATA_PATH\'"""'], {}), '(\n "ERROR: Undefined online cache data root path. Please set environment variable \'DATA_PATH\'"\n )\n', (8516, 8619), False, 'import sys\n'), ((8657, 8679), 'os.getenv', 'os.getenv', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (8666, 8679), False, 'import os\n'), ((9334, 9356), 'os.getenv', 'os.getenv', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (9343, 9356), False, 'import os\n'), ((9370, 9481), 'sys.exit', 'sys.exit', (['"""ERROR: Undefined online cache data root path. Please set environment variable \'DATA_PATH\'"""'], {}), '(\n "ERROR: Undefined online cache data root path. Please set environment variable \'DATA_PATH\'"\n )\n', (9378, 9481), False, 'import sys\n'), ((9519, 9541), 'os.getenv', 'os.getenv', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (9528, 9541), False, 'import os\n'), ((5445, 5497), 'drizzlepac.haputils.poller_utils.determine_filter_name', 'poller_utils.determine_filter_name', (["imghdr['FILTER']"], {}), "(imghdr['FILTER'])\n", (5479, 5497), False, 'from drizzlepac.haputils import poller_utils\n'), ((5730, 5775), 're.compile', 're.compile', (['"""(skycell-p\\\\d{4}x\\\\d{2}y\\\\d{2})"""'], {}), "('(skycell-p\\\\d{4}x\\\\d{2}y\\\\d{2})')\n", (5740, 5775), False, 'import re\n'), ((8371, 8382), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8380, 8382), False, 'import os\n'), ((9224, 9235), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9233, 9235), False, 'import os\n')] |
import unittest
from ...scenes import Scene
class TestSceneMethods(unittest.TestCase):
def test_scene(self):
scene = Scene()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((173, 188), 'unittest.main', 'unittest.main', ([], {}), '()\n', (186, 188), False, 'import unittest\n')] |
#!/usr/bin/evn python3
# coding=utf-8
import logging
import redis
from typing import Any
from conf import dev_conf as conf
from util import singleton
@singleton
class Config:
"""
根据指定的配置文件,把conf文件转换成字典
默认情况下使用 conf 中的配置
"""
def __init__(self):
self.config = conf
self.redis_db = None
def get_dict(self, exclude: str = "__") -> dict:
"""
把配置文件的内容转换成字典,默认情况下会忽略配置文件中以 '__xxx' 开头的配置项
:param exclude:
:return: dict
"""
config_data = {}
for key in dir(self.config):
if not str(key).startswith(exclude):
config_data[key] = getattr(self.config, key)
return config_data
def get_conf(self, key_name: str) -> Any:
"""
获取单个配置值
:param key_name:
:return:
"""
all_config = self.get_dict()
return all_config[key_name]
def get_db_url(self, db_role="master", db_name="default") -> str:
"""
# 获取MySQL 连接字符串
:param db_role:
:param db_name:
:return:
"""
try:
db_config = self.get_conf("mysql").get(db_role).get(db_name)
except AttributeError as error_info:
logging.error(f"Config: {error_info} Key!")
else:
tmp = "mysql+pymysql://{username}:{password}@{hostname}:" + \
"{port}/{database}?charset={charset}"
db_url = tmp.format(**db_config)
return db_url
def get_redis(self, db_role="master", db_name="default") -> redis.Redis:
"""
# 获取redis 配置
:param db_role:
:param db_name:
:return:
"""
if self.redis_db:
return self.redis_db
try:
db_conf = self.get_conf("redis").get(db_role).get(db_name)
pool = redis.ConnectionPool(
host=db_conf["host"],
port=db_conf["port"],
db=db_conf["db"]
)
self.redis_db = redis.Redis(connection_pool=pool)
except AttributeError as db_exp:
logging.error(f"get_redis_error:{db_exp}")
else:
return self.redis_db
class ConfigMixIn:
"""
方便其它类混入使用
"""
conf = Config()
if __name__ == '__main__':
a = Config()
b = Config()
print(id(a))
print(id(b))
c = a.get_redis()
d = b.get_redis()
print(id(c))
print(id(d))
c.set("ka", "va")
| [
"redis.ConnectionPool",
"logging.error",
"redis.Redis"
] | [((1837, 1924), 'redis.ConnectionPool', 'redis.ConnectionPool', ([], {'host': "db_conf['host']", 'port': "db_conf['port']", 'db': "db_conf['db']"}), "(host=db_conf['host'], port=db_conf['port'], db=db_conf\n ['db'])\n", (1857, 1924), False, 'import redis\n'), ((2010, 2043), 'redis.Redis', 'redis.Redis', ([], {'connection_pool': 'pool'}), '(connection_pool=pool)\n', (2021, 2043), False, 'import redis\n'), ((1228, 1271), 'logging.error', 'logging.error', (['f"""Config: {error_info} Key!"""'], {}), "(f'Config: {error_info} Key!')\n", (1241, 1271), False, 'import logging\n'), ((2097, 2139), 'logging.error', 'logging.error', (['f"""get_redis_error:{db_exp}"""'], {}), "(f'get_redis_error:{db_exp}')\n", (2110, 2139), False, 'import logging\n')] |
import json
import os
script_path = os.path.abspath(__file__)
script_dir = os.path.split(script_path)[0]
def get_config():
rel_path = 'resources/config.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
config = json.loads(f.read())
return config
def get_submissions():
rel_path = 'resources/submissions.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
submissions = json.loads(f.read())
return submissions
def get_subreddits():
rel_path = 'resources/subreddits.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'r') as f:
subreddits = json.loads(f.read())
return subreddits
def update_config(config):
rel_path = 'resources/config.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(config, indent=2))
def update_submissions(submissions):
rel_path = 'resources/submissions.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(submissions, indent=2, default=str))
def update_subreddits(subreddits):
rel_path = 'resources/subreddits.json'
path = os.path.join(script_dir, rel_path)
with open(path, 'w') as f:
f.write(json.dumps(subreddits, indent=2)) | [
"os.path.abspath",
"json.dumps",
"os.path.join",
"os.path.split"
] | [((38, 63), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (53, 63), False, 'import os\n'), ((77, 103), 'os.path.split', 'os.path.split', (['script_path'], {}), '(script_path)\n', (90, 103), False, 'import os\n'), ((177, 211), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (189, 211), False, 'import os\n'), ((379, 413), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (391, 413), False, 'import os\n'), ((589, 623), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (601, 623), False, 'import os\n'), ((798, 832), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (810, 832), False, 'import os\n'), ((1004, 1038), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (1016, 1038), False, 'import os\n'), ((1225, 1259), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (1237, 1259), False, 'import os\n'), ((880, 908), 'json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (890, 908), False, 'import json\n'), ((1086, 1132), 'json.dumps', 'json.dumps', (['submissions'], {'indent': '(2)', 'default': 'str'}), '(submissions, indent=2, default=str)\n', (1096, 1132), False, 'import json\n'), ((1307, 1339), 'json.dumps', 'json.dumps', (['subreddits'], {'indent': '(2)'}), '(subreddits, indent=2)\n', (1317, 1339), False, 'import json\n')] |
from flask import g
from flask_socketio import SocketIO, emit
from logic.game_manager import GameManager
from logic.player_manager import PlayerManager
from logic.player_logic import PlayerLogic
from globals import socketio, db
from session import SessionHelper, SessionKeys
from utils.response import Response
from utils.socketio_helper import commit_and_notify_if_dirty
@socketio.on('disconnect')
def on_disconnect():
change_state(False)
@socketio.on('connect')
def on_connect():
change_state(True)
def change_state(is_online: bool):
if SessionHelper.has(SessionKeys.PLAYER_ID):
pm = PlayerManager(db)
player = pm.get_my_player()
if player is None:
return
try:
gm = GameManager(db)
g.game = gm.get_my_game(optional=True)
g.game.set_dirty()
player.set_online(is_online)
change_admin(is_online, player)
commit_and_notify_if_dirty()
db.session.remove()
except Exception as e:
# emit('waitroom', Response.Error("Не удалось сменить статус").as_dicts())
db.session.rollback()
db.session.remove()
raise
def change_admin(is_online: bool, player: PlayerLogic):
if not is_online and player.model.isAdmin:
gm = GameManager(db)
new_adm = next((p.model for p in gm.get_my_game().get_players(True) if (not p.model.isAdmin and p.model.isOnline)), None)
if new_adm is not None:
new_adm.isAdmin = True
player.model.isAdmin = False
db.session.commit()
gm.get_my_game().notify()
| [
"globals.db.session.commit",
"utils.socketio_helper.commit_and_notify_if_dirty",
"globals.socketio.on",
"logic.player_manager.PlayerManager",
"globals.db.session.rollback",
"globals.db.session.remove",
"flask.g.game.set_dirty",
"logic.game_manager.GameManager",
"session.SessionHelper.has"
] | [((376, 401), 'globals.socketio.on', 'socketio.on', (['"""disconnect"""'], {}), "('disconnect')\n", (387, 401), False, 'from globals import socketio, db\n'), ((450, 472), 'globals.socketio.on', 'socketio.on', (['"""connect"""'], {}), "('connect')\n", (461, 472), False, 'from globals import socketio, db\n'), ((558, 598), 'session.SessionHelper.has', 'SessionHelper.has', (['SessionKeys.PLAYER_ID'], {}), '(SessionKeys.PLAYER_ID)\n', (575, 598), False, 'from session import SessionHelper, SessionKeys\n'), ((613, 630), 'logic.player_manager.PlayerManager', 'PlayerManager', (['db'], {}), '(db)\n', (626, 630), False, 'from logic.player_manager import PlayerManager\n'), ((1319, 1334), 'logic.game_manager.GameManager', 'GameManager', (['db'], {}), '(db)\n', (1330, 1334), False, 'from logic.game_manager import GameManager\n'), ((743, 758), 'logic.game_manager.GameManager', 'GameManager', (['db'], {}), '(db)\n', (754, 758), False, 'from logic.game_manager import GameManager\n'), ((822, 840), 'flask.g.game.set_dirty', 'g.game.set_dirty', ([], {}), '()\n', (838, 840), False, 'from flask import g\n'), ((938, 966), 'utils.socketio_helper.commit_and_notify_if_dirty', 'commit_and_notify_if_dirty', ([], {}), '()\n', (964, 966), False, 'from utils.socketio_helper import commit_and_notify_if_dirty\n'), ((979, 998), 'globals.db.session.remove', 'db.session.remove', ([], {}), '()\n', (996, 998), False, 'from globals import socketio, db\n'), ((1585, 1604), 'globals.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1602, 1604), False, 'from globals import socketio, db\n'), ((1129, 1150), 'globals.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1148, 1150), False, 'from globals import socketio, db\n'), ((1163, 1182), 'globals.db.session.remove', 'db.session.remove', ([], {}), '()\n', (1180, 1182), False, 'from globals import socketio, db\n')] |
from django.contrib import admin
from .models import Rating
# Register your models here.
@admin.register(Rating)
class RatingAdmin(admin.ModelAdmin):
date_hierarchy = 'created_on'
search_fields = ['user_id__username', 'value']
list_display = ('user_id', 'value',)
list_filter = ('user_id', 'value', 'is_deleted')
| [
"django.contrib.admin.register"
] | [((92, 114), 'django.contrib.admin.register', 'admin.register', (['Rating'], {}), '(Rating)\n', (106, 114), False, 'from django.contrib import admin\n')] |
import discpy
from discpy import commands
bot = commands.Bot(command_prefix='!')
# you can set "arg" keyword argument to the name of argument that represents the option in the command function
# and then change the option name as desired.
@bot.slash_command()
@discpy.application.option('sentence', arg='text', description='The text to say!')
async def say(ctx, text):
await ctx.respond(f'{ctx.author.name} said: {text}')
# in above command, the option name in discord will appear "sentence" but in this function, it will
# be passed to text argument
bot.run('token')
| [
"discpy.application.option",
"discpy.commands.Bot"
] | [((49, 81), 'discpy.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""'}), "(command_prefix='!')\n", (61, 81), False, 'from discpy import commands\n'), ((263, 349), 'discpy.application.option', 'discpy.application.option', (['"""sentence"""'], {'arg': '"""text"""', 'description': '"""The text to say!"""'}), "('sentence', arg='text', description=\n 'The text to say!')\n", (288, 349), False, 'import discpy\n')] |
from huaweisms.api.common import get_from_url, ApiCtx
from .config import API_URL
def status(ctx: ApiCtx):
url = "{}/monitoring/status".format(API_URL)
return get_from_url(url, ctx)
| [
"huaweisms.api.common.get_from_url"
] | [((169, 191), 'huaweisms.api.common.get_from_url', 'get_from_url', (['url', 'ctx'], {}), '(url, ctx)\n', (181, 191), False, 'from huaweisms.api.common import get_from_url, ApiCtx\n')] |
"""add role
Revision ID: 221ccee39de7
Revises: <KEY>
Create Date: 2021-05-13 23:51:53.241485
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "221ccee39de7"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"role",
sa.Column("id", postgresql.UUID(), nullable=False),
sa.Column("title", sa.String(), nullable=False),
sa.Column("grade_id", postgresql.UUID(), nullable=False),
sa.Column("practice_id", postgresql.UUID(), nullable=False),
sa.Column("organisation_id", postgresql.UUID(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(["grade_id"], ["grade.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["organisation_id"], ["organisation.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["practice_id"], ["practice.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_role_created_at"), "role", ["created_at"], unique=False)
op.create_index(op.f("ix_role_title"), "role", ["title"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_role_title"), table_name="role")
op.drop_index(op.f("ix_role_created_at"), table_name="role")
op.drop_table("role")
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.dialects.postgresql.UUID",
"sqlalchemy.String"
] | [((1637, 1658), 'alembic.op.drop_table', 'op.drop_table', (['"""role"""'], {}), "('role')\n", (1650, 1658), False, 'from alembic import op\n'), ((926, 997), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['grade_id']", "['grade.id']"], {'ondelete': '"""CASCADE"""'}), "(['grade_id'], ['grade.id'], ondelete='CASCADE')\n", (949, 997), True, 'import sqlalchemy as sa\n'), ((1007, 1097), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['organisation_id']", "['organisation.id']"], {'ondelete': '"""CASCADE"""'}), "(['organisation_id'], ['organisation.id'], ondelete=\n 'CASCADE')\n", (1030, 1097), True, 'import sqlalchemy as sa\n'), ((1102, 1179), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['practice_id']", "['practice.id']"], {'ondelete': '"""CASCADE"""'}), "(['practice_id'], ['practice.id'], ondelete='CASCADE')\n", (1125, 1179), True, 'import sqlalchemy as sa\n'), ((1189, 1218), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1212, 1218), True, 'import sqlalchemy as sa\n'), ((1246, 1272), 'alembic.op.f', 'op.f', (['"""ix_role_created_at"""'], {}), "('ix_role_created_at')\n", (1250, 1272), False, 'from alembic import op\n'), ((1332, 1353), 'alembic.op.f', 'op.f', (['"""ix_role_title"""'], {}), "('ix_role_title')\n", (1336, 1353), False, 'from alembic import op\n'), ((1526, 1547), 'alembic.op.f', 'op.f', (['"""ix_role_title"""'], {}), "('ix_role_title')\n", (1530, 1547), False, 'from alembic import op\n'), ((1586, 1612), 'alembic.op.f', 'op.f', (['"""ix_role_created_at"""'], {}), "('ix_role_created_at')\n", (1590, 1612), False, 'from alembic import op\n'), ((464, 481), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (479, 481), False, 'from sqlalchemy.dialects import postgresql\n'), ((527, 538), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (536, 538), True, 'import sqlalchemy as sa\n'), ((587, 604), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (602, 604), False, 'from sqlalchemy.dialects import postgresql\n'), ((656, 673), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (671, 673), False, 'from sqlalchemy.dialects import postgresql\n'), ((729, 746), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (744, 746), False, 'from sqlalchemy.dialects import postgresql\n'), ((797, 823), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (808, 823), True, 'import sqlalchemy as sa\n'), ((874, 900), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (885, 900), True, 'import sqlalchemy as sa\n')] |
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 <NAME>, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import time
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
from urllib.parse import quote
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_host_port_path(self, url):
if url[:7] == 'http://':
url = url[7:]
elif url[:8] == 'https://':
url = url[8:]
host_port = url.split('/')[0].split(':')
host = host_port[0]
if len(host_port) > 1:
port = int(host_port[1])
else:
port = 80
path = '/' + '/'.join(url.split('/')[1:])
return host, port, path
def get_code(self, data):
return int(data.split()[1])
def get_headers(self,data):
return data.split("\r\n\r\n")[0]
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('ISO-8859-1')
def GET(self, url, args=None):
host, port, path = self.get_host_port_path(url)
data_send = "GET " + path + " HTTP/1.1\r\n" + "Host: " + host + "\r\nAccept: */*\r\nConnection: close\r\n\r\n"
self.connect(host, port)
self.sendall(data_send)
data_recv = self.recvall(self.socket)
code = self.get_code(data_recv)
body = self.get_body(data_recv)
self.close()
return HTTPResponse(code, body)
def POST(self, url, args=None):
host, port, path = self.get_host_port_path(url)
data_send = "POST " + path + " HTTP/1.1\r\n" + "Host: " + host + "\r\nAccept: */*\r\nConnection: close\r\nUser-Agent: Assignment/2\r\n"
if args is not None:
arg_string = ""
for i, (key, value) in enumerate(args.items()):
arg_string += quote(key) + '=' + quote(value)
if i != len(args)-1:
arg_string += '&'
content_length = len(arg_string)
data_send += "Content-Length: " + str(content_length) + "\r\n"
data_send += "Content-Type: application/x-www-form-urlencoded\r\n\r\n"
data_send += arg_string
else:
data_send += "Content-Length: 0"
data_send += '\r\n\r\n'
print(data_send)
self.connect(host, port)
self.sendall(data_send)
data_recv = self.recvall(self.socket)
code = self.get_code(data_recv)
body = self.get_body(data_recv)
self.close()
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
response = client.command( sys.argv[2], sys.argv[1] )
print("Code:", response.code)
print("Body:")
print(response.body)
elif (len(sys.argv) == 4):
# Split data string into dict
args_split = sys.argv[3].split('&')
args = {}
for arg in args_split:
key, value = arg.split('=')
args[key] = value
response = client.command( sys.argv[2], sys.argv[1], args )
print("Code:", response.code)
print("Body:")
print(response.body)
else:
response = client.command( sys.argv[1] )
| [
"urllib.parse.quote",
"socket.socket",
"sys.exit"
] | [((1257, 1306), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1270, 1306), False, 'import socket\n'), ((4432, 4443), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4440, 4443), False, 'import sys\n'), ((3413, 3425), 'urllib.parse.quote', 'quote', (['value'], {}), '(value)\n', (3418, 3425), False, 'from urllib.parse import quote\n'), ((3394, 3404), 'urllib.parse.quote', 'quote', (['key'], {}), '(key)\n', (3399, 3404), False, 'from urllib.parse import quote\n')] |
"""Wrappers for the ophyd devices."""
from ophyd import Device, Signal
from ophyd import Kind
from ophyd.device import Component as Cpt
class CalibrationData(Device):
"""A device to hold pyFAI calibration data."""
dist = Cpt(Signal, value=1., kind=Kind.config)
poni1 = Cpt(Signal, value=0., kind=Kind.config)
poni2 = Cpt(Signal, value=0., kind=Kind.config)
rot1 = Cpt(Signal, value=0., kind=Kind.config)
rot2 = Cpt(Signal, value=0., kind=Kind.config)
rot3 = Cpt(Signal, value=0., kind=Kind.config)
pixel1 = Cpt(Signal, value=0., kind=Kind.config)
pixel2 = Cpt(Signal, value=0., kind=Kind.config)
detector = Cpt(Signal, value="", kind=Kind.config)
wavelength = Cpt(Signal, value=0., kind=Kind.config)
| [
"ophyd.device.Component"
] | [((231, 271), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(1.0)', 'kind': 'Kind.config'}), '(Signal, value=1.0, kind=Kind.config)\n', (234, 271), True, 'from ophyd.device import Component as Cpt\n'), ((283, 323), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (286, 323), True, 'from ophyd.device import Component as Cpt\n'), ((335, 375), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (338, 375), True, 'from ophyd.device import Component as Cpt\n'), ((386, 426), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (389, 426), True, 'from ophyd.device import Component as Cpt\n'), ((437, 477), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (440, 477), True, 'from ophyd.device import Component as Cpt\n'), ((488, 528), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (491, 528), True, 'from ophyd.device import Component as Cpt\n'), ((541, 581), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (544, 581), True, 'from ophyd.device import Component as Cpt\n'), ((594, 634), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (597, 634), True, 'from ophyd.device import Component as Cpt\n'), ((649, 688), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '""""""', 'kind': 'Kind.config'}), "(Signal, value='', kind=Kind.config)\n", (652, 688), True, 'from ophyd.device import Component as Cpt\n'), ((706, 746), 'ophyd.device.Component', 'Cpt', (['Signal'], {'value': '(0.0)', 'kind': 'Kind.config'}), '(Signal, value=0.0, kind=Kind.config)\n', (709, 746), True, 'from ophyd.device import Component as Cpt\n')] |
import numpy as np
import pytest
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from hcrystalball.metrics import get_scorer
from hcrystalball.model_selection import FinerTimeSplit
from hcrystalball.model_selection import get_best_not_failing_model
from hcrystalball.model_selection import select_model
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
@pytest.mark.parametrize(
"train_data, grid_search, parallel_over_dict",
[("two_regions", "", {"Region": "region_0"}), ("two_regions", "", None)],
indirect=["train_data", "grid_search"],
)
def test_select_model(train_data, grid_search, parallel_over_dict):
_train_data = train_data
if parallel_over_dict:
col, value = list(parallel_over_dict.items())[0]
_train_data = train_data[train_data[col] == value].drop(columns="Region")
partition_columns = ["Region", "Product"]
results = select_model(
_train_data,
target_col_name="Quantity",
partition_columns=partition_columns,
parallel_over_dict=parallel_over_dict,
grid_search=grid_search,
country_code_column="Holidays_code",
)
if parallel_over_dict:
partitions = (
train_data.loc[train_data[col] == value, partition_columns]
.drop_duplicates()
.to_dict(orient="records")
)
else:
partitions = train_data[partition_columns].drop_duplicates().to_dict(orient="records")
assert len(results) == len(partitions)
for result in results:
assert result.best_model_name == "good_dummy"
assert result.partition in partitions
@pytest.mark.parametrize(
"X_y_optional, negative_data, best_model_name, rank, expected_error",
[
("", False, "ExponentialSmoothingWrapper", 1, None),
("", True, "SklearnWrapper", 2, None),
("", True, "", 2, ValueError),
],
indirect=["X_y_optional"],
)
def test_get_best_not_failing_model(X_y_optional, negative_data, best_model_name, rank, expected_error):
X, y = X_y_optional
# data contains 0
y[y < 1] = 1
if negative_data:
y[-1] = -1
models = [
ExponentialSmoothingWrapper(freq="D", seasonal="mul"),
get_sklearn_wrapper(DummyRegressor, strategy="constant", constant=-1000),
]
models = models if expected_error is None else models[:1]
grid_search = GridSearchCV(
estimator=Pipeline([("model", "passthrough")]),
param_grid=[{"model": models}],
scoring=get_scorer("neg_mean_absolute_error"),
cv=FinerTimeSplit(n_splits=1, horizon=5),
refit=False,
error_score=np.nan,
)
grid_search.fit(X, y)
if expected_error:
with pytest.raises(expected_error):
get_best_not_failing_model(grid_search, X, y)
else:
best_param_rank = get_best_not_failing_model(grid_search, X, y)
assert isinstance(best_param_rank, dict)
assert best_param_rank["params"]["model"].__class__.__name__ == best_model_name
assert best_param_rank["rank"] == rank
| [
"hcrystalball.model_selection.get_best_not_failing_model",
"pytest.mark.parametrize",
"hcrystalball.metrics.get_scorer",
"hcrystalball.wrappers.get_sklearn_wrapper",
"pytest.raises",
"sklearn.pipeline.Pipeline",
"hcrystalball.model_selection.FinerTimeSplit",
"hcrystalball.model_selection.select_model"... | [((503, 696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""train_data, grid_search, parallel_over_dict"""', "[('two_regions', '', {'Region': 'region_0'}), ('two_regions', '', None)]"], {'indirect': "['train_data', 'grid_search']"}), "('train_data, grid_search, parallel_over_dict', [(\n 'two_regions', '', {'Region': 'region_0'}), ('two_regions', '', None)],\n indirect=['train_data', 'grid_search'])\n", (526, 696), False, 'import pytest\n'), ((1759, 2023), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""X_y_optional, negative_data, best_model_name, rank, expected_error"""', "[('', False, 'ExponentialSmoothingWrapper', 1, None), ('', True,\n 'SklearnWrapper', 2, None), ('', True, '', 2, ValueError)]"], {'indirect': "['X_y_optional']"}), "(\n 'X_y_optional, negative_data, best_model_name, rank, expected_error', [\n ('', False, 'ExponentialSmoothingWrapper', 1, None), ('', True,\n 'SklearnWrapper', 2, None), ('', True, '', 2, ValueError)], indirect=[\n 'X_y_optional'])\n", (1782, 2023), False, 'import pytest\n'), ((1030, 1231), 'hcrystalball.model_selection.select_model', 'select_model', (['_train_data'], {'target_col_name': '"""Quantity"""', 'partition_columns': 'partition_columns', 'parallel_over_dict': 'parallel_over_dict', 'grid_search': 'grid_search', 'country_code_column': '"""Holidays_code"""'}), "(_train_data, target_col_name='Quantity', partition_columns=\n partition_columns, parallel_over_dict=parallel_over_dict, grid_search=\n grid_search, country_code_column='Holidays_code')\n", (1042, 1231), False, 'from hcrystalball.model_selection import select_model\n'), ((2283, 2336), 'hcrystalball.wrappers.ExponentialSmoothingWrapper', 'ExponentialSmoothingWrapper', ([], {'freq': '"""D"""', 'seasonal': '"""mul"""'}), "(freq='D', seasonal='mul')\n", (2310, 2336), False, 'from hcrystalball.wrappers import ExponentialSmoothingWrapper\n'), ((2346, 2418), 'hcrystalball.wrappers.get_sklearn_wrapper', 'get_sklearn_wrapper', (['DummyRegressor'], {'strategy': '"""constant"""', 'constant': '(-1000)'}), "(DummyRegressor, strategy='constant', constant=-1000)\n", (2365, 2418), False, 'from hcrystalball.wrappers import get_sklearn_wrapper\n'), ((2965, 3010), 'hcrystalball.model_selection.get_best_not_failing_model', 'get_best_not_failing_model', (['grid_search', 'X', 'y'], {}), '(grid_search, X, y)\n', (2991, 3010), False, 'from hcrystalball.model_selection import get_best_not_failing_model\n'), ((2538, 2574), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('model', 'passthrough')]"], {}), "([('model', 'passthrough')])\n", (2546, 2574), False, 'from sklearn.pipeline import Pipeline\n'), ((2632, 2669), 'hcrystalball.metrics.get_scorer', 'get_scorer', (['"""neg_mean_absolute_error"""'], {}), "('neg_mean_absolute_error')\n", (2642, 2669), False, 'from hcrystalball.metrics import get_scorer\n'), ((2682, 2719), 'hcrystalball.model_selection.FinerTimeSplit', 'FinerTimeSplit', ([], {'n_splits': '(1)', 'horizon': '(5)'}), '(n_splits=1, horizon=5)\n', (2696, 2719), False, 'from hcrystalball.model_selection import FinerTimeSplit\n'), ((2840, 2869), 'pytest.raises', 'pytest.raises', (['expected_error'], {}), '(expected_error)\n', (2853, 2869), False, 'import pytest\n'), ((2883, 2928), 'hcrystalball.model_selection.get_best_not_failing_model', 'get_best_not_failing_model', (['grid_search', 'X', 'y'], {}), '(grid_search, X, y)\n', (2909, 2928), False, 'from hcrystalball.model_selection import get_best_not_failing_model\n')] |
from squidpy.instrument import Instrument
import visa
class SR830(Instrument):
'''
Instrument driver for SR830
'''
def __init__(self, gpib_address='', name='SR830'):
self._units = {'amplitude': 'V', 'frequency': 'Hz'}
self._visa_handle = visa.ResourceManager().open_resource(gpib_address)
self._visa_handle.read_termination = '\n'
self.time_constant_options = {
"10 us": 0,
"30 us": 1,
"100 us": 2,
"300 us": 3,
"1 ms": 4,
"3 ms": 5,
"10 ms": 6,
"30 ms": 7,
"100 ms": 8,
"300 ms": 9,
"1 s": 10,
"3 s": 11,
"10 s": 12,
"30 s": 13,
"100 s": 14,
"300 s": 15,
"1 ks": 16,
"3 ks": 17,
"10 ks": 18,
"30 ks": 19
}
self.sensitivity_options = [
2e-9, 5e-9, 10e-9, 20e-9, 50e-9, 100e-9, 200e-9,
500e-9, 1e-6, 2e-6, 5e-6, 10e-6, 20e-6, 50e-6, 100e-6,
200e-6, 500e-6, 1e-3, 2e-3, 5e-3, 10e-3, 20e-3,
50e-3, 100e-3, 200e-3, 500e-3, 1]
super(SR830, self).__init__(name)
@property
def sensitivity(self):
'''Get the lockin sensitivity'''
return self.sensitivity_options[int(self._visa_handle.ask('SENS?'))]
@sensitivity.setter
def sensitivity(self, value):
'''Set the sensitivity'''
self._visa_handle.write('SENS%d' %self.sensitivity_options.index(value))
@property
def amplitude(self):
'''Get the output amplitude'''
return self._visa_handle.ask('SLVL?')
@amplitude.setter
def amplitude(self, value):
'''Set the amplitude.'''
self._visa_handle.write('SLVL %s' %value)
@property
def frequency(self):
return self._visa_handle.ask('FREQ?')
@frequency.setter
def frequency(self, value):
self._visa_handle.write('FREQ %s' %value)
@property
def X(self):
return float(self._visa_handle.ask('OUTP?1'))
@property
def Y(self):
return float(self._visa_handle.ask('OUTP?2'))
@property
def R(self):
return float(self._visa_handle.ask('OUTP?3'))
@property
def theta(self):
return float(self._visa_handle.ask('OUTP?4'))
@property
def time_constant(self):
options = {self.time_constant_options[key]: key for key in self.time_constant_options.keys()}
return options[int(self._visa_handle.ask('OFLT?'))]
@time_constant.setter
def time_constant(self, value):
self._visa_handle.write('OFLT %s' %self.time_constant_options[value])
def __del__(self):
self._visa_handle.close() | [
"visa.ResourceManager"
] | [((271, 293), 'visa.ResourceManager', 'visa.ResourceManager', ([], {}), '()\n', (291, 293), False, 'import visa\n')] |
import os
from logging.config import dictConfig
from typing import Optional
from flask import render_template, request, send_from_directory
from flask_babel import get_locale, lazy_gettext as _
from werkzeug.utils import ImportStringError
from .auth import auth_bp
from .comp import comp_bp
from .ext import babel, csrf, db, login_manager, pages
from .home import home_bp
from .models import User
from .profile import profile_bp
from .utils import pagination
from .utils.app import Application
__all__ = ['create_app']
def create_app(env: Optional[str] = None) -> Application:
flask_environment = os.environ.get('FLASK_ENV', '').lower()
if flask_environment == 'production':
configure_logging()
app = Application()
configure_app(app, env)
configure_extensions(app)
with app.app_context():
configure_blueprints(app)
configure_templates(app)
configure_error_handlers(app)
return app
def configure_app(app: Application, env: Optional[str]):
app.config.from_object('hbcomp.config')
if env is not None:
try:
app.config.from_object(f'hbcomp.config_{env}')
except ImportStringError:
app.logger.info(f'no environment configuration for {env}')
if app.config['DEBUG']:
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'), 'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def configure_blueprints(app: Application):
app.register_blueprint(home_bp)
app.register_blueprint(profile_bp, url_prefix='/profile')
app.register_blueprint(comp_bp, url_prefix='/comp')
app.register_blueprint(auth_bp, url_prefix='/auth')
def configure_extensions(app: Application):
db.init_app(app)
csrf.init_app(app)
pages.init_app(app)
pages.get('foo')
if not app.testing:
@babel.localeselector
def get_locale():
accept_languages = app.config.get('ACCEPT_LANGUAGES', ['pl', 'en'])
return request.accept_languages.best_match(accept_languages)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
babel.init_app(app)
login_manager.init_app(app)
login_manager.login_view = 'auth.login'
login_manager.login_message = _('Please log in to access this page')
login_manager.login_message_category = 'warning'
def configure_templates(app: Application):
app.jinja_env.globals.update({
'url_for_other_page': pagination.url_for_other_page,
'get_locale': get_locale,
})
def configure_logging():
dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
}
},
'handlers': {
'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
}
},
'root': {
'level': 'INFO',
'handlers': ['wsgi'],
},
})
def configure_error_handlers(app: Application):
@app.errorhandler(403)
def forbidden_page(error):
return render_template('errors/403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template('errors/500.html'), 500
| [
"flask.render_template",
"logging.config.dictConfig",
"flask.request.accept_languages.best_match",
"os.environ.get",
"os.path.join",
"flask_babel.lazy_gettext"
] | [((2384, 2422), 'flask_babel.lazy_gettext', '_', (['"""Please log in to access this page"""'], {}), "('Please log in to access this page')\n", (2385, 2422), True, 'from flask_babel import get_locale, lazy_gettext as _\n'), ((2689, 3011), 'logging.config.dictConfig', 'dictConfig', (["{'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'}}, 'handlers':\n {'wsgi': {'class': 'logging.StreamHandler', 'stream':\n 'ext://flask.logging.wsgi_errors_stream', 'formatter': 'default'}},\n 'root': {'level': 'INFO', 'handlers': ['wsgi']}}"], {}), "({'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'}}, 'handlers':\n {'wsgi': {'class': 'logging.StreamHandler', 'stream':\n 'ext://flask.logging.wsgi_errors_stream', 'formatter': 'default'}},\n 'root': {'level': 'INFO', 'handlers': ['wsgi']}})\n", (2699, 3011), False, 'from logging.config import dictConfig\n'), ((606, 637), 'os.environ.get', 'os.environ.get', (['"""FLASK_ENV"""', '""""""'], {}), "('FLASK_ENV', '')\n", (620, 637), False, 'import os\n'), ((2083, 2136), 'flask.request.accept_languages.best_match', 'request.accept_languages.best_match', (['accept_languages'], {}), '(accept_languages)\n', (2118, 2136), False, 'from flask import render_template, request, send_from_directory\n'), ((3331, 3365), 'flask.render_template', 'render_template', (['"""errors/403.html"""'], {}), "('errors/403.html')\n", (3346, 3365), False, 'from flask import render_template, request, send_from_directory\n'), ((3445, 3479), 'flask.render_template', 'render_template', (['"""errors/404.html"""'], {}), "('errors/404.html')\n", (3460, 3479), False, 'from flask import render_template, request, send_from_directory\n'), ((3562, 3596), 'flask.render_template', 'render_template', (['"""errors/500.html"""'], {}), "('errors/500.html')\n", (3577, 3596), False, 'from flask import render_template, request, send_from_directory\n'), ((1392, 1429), 'os.path.join', 'os.path.join', (['app.root_path', '"""static"""'], {}), "(app.root_path, 'static')\n", (1404, 1429), False, 'import os\n')] |
# -*- coding: utf-8 -*-
##############################################################################
##
## This file is part of Taurus
##
## http://taurus-scada.org
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Taurus is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Taurus is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import sys
import taurus
import unittest
def _import(name):
__import__(name)
return sys.modules[name]
class QtTestCase(unittest.TestCase):
_api_name = None
def setUp(self):
taurus.setLogLevel(taurus.Critical)
self.opt_mods = ("QtDesigner", "QtNetwork", "Qt", "QtSvg",
"QtUiTools", "QtWebKit", "Qwt5", "uic")
# store a "snapshot" of the currently loaded modules
self._orig_mods = set(sys.modules.keys())
# this import initializes Qt in case it is not loaded
from taurus.external.qt import Qt, API_NAME
self._api_name = API_NAME
self.__qt = Qt
def test_qt_base_import(self):
mods = set(sys.modules.keys())
other_apis = set(('PyQt5', 'PySide2', 'PyQt4', 'PySide'))
other_apis.remove(self._api_name)
# the selected API and the QtCore should be loaded
self.assertTrue(self._api_name in mods, self._api_name + " not loaded")
self.assertTrue(self._api_name + ".QtCore" in mods,
"QtCore not loaded")
# the other APIs should *not* be loaded
for other_api in other_apis:
self.assertFalse(
other_api in mods,
other_api + " loaded in " + self._api_name + " test")
# the other Qt submodules should *not* be loaded
for opt_mod in self.opt_mods:
mod = "{0}.{1}".format(self._api_name, opt_mod)
self.assertFalse(mod in mods - self._orig_mods, mod + " is loaded")
def __test_qt_module(self, qt_mod_name):
"""Checks that the given shim is complete"""
taurus_qt_mod_name = "taurus.external.qt.{0}".format(qt_mod_name)
orig_qt_mod_name = "{0}.{1}".format(self._api_name, qt_mod_name)
TaurusQtMod = _import(taurus_qt_mod_name)
OrigQtMod = _import(orig_qt_mod_name)
taurus_qt_mod_members = [m for m in dir(TaurusQtMod)
if not m.startswith("_")]
orig_qt_mod_members = [m for m in dir(OrigQtMod)
if not m.startswith("_")]
for orig_member_name in orig_qt_mod_members:
self.assertTrue(
orig_member_name in taurus_qt_mod_members,
"Taurus {0} does not contain {1}".format(qt_mod_name,
orig_member_name)
)
def test_qt_core(self):
"""Check the QtCore shim"""
return self.__test_qt_module("QtCore")
def test_qt_gui(self):
"""Check the QtGui shim"""
return self.__test_qt_module("QtGui")
def main():
unittest.main(verbosity=2)
if __name__ == "__main__":
main()
| [
"unittest.main",
"taurus.setLogLevel",
"sys.modules.keys"
] | [((3628, 3654), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3641, 3654), False, 'import unittest\n'), ((1172, 1207), 'taurus.setLogLevel', 'taurus.setLogLevel', (['taurus.Critical'], {}), '(taurus.Critical)\n', (1190, 1207), False, 'import taurus\n'), ((1433, 1451), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (1449, 1451), False, 'import sys\n'), ((1681, 1699), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (1697, 1699), False, 'import sys\n')] |
import os, pickle
import os.path as osp
import numpy as np
import cv2
import scipy.ndimage as nd
import init_path
from lib.dataset.get_dataset import get_dataset
from lib.network.sgan import SGAN
import torch
from torch.utils.data import DataLoader
import argparse
from ipdb import set_trace
import matplotlib.pyplot as plt
from lib.utils import pyutils
classes=['background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cfg_file", default=None, type=str)
args = parser.parse_args()
args = pyutils.read_yaml2cls(args.cfg_file)
return args
# mean pixel : in B-G-R channel order
mean_pixel = np.array([104.008, 116.669, 122.675])
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image)
def generate_seed_with_ignore(localization):
"""
This function generate seed ignoring all the conflicts
:param localization: (41, 41, 21) binary value
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# set_trace()
# find conflict index
sum_loc = np.sum(localization, axis=2)
conflict_ind = np.where(sum_loc > 1)
# set conflict position to 0
localization[conflict_ind[0], conflict_ind[1], :] = 0
# generate seed
ind = np.where(localization)
mask = np.ones(shape=(h, w), dtype=np.int) * 21
mask[ind[0], ind[1]] = ind[2]
return mask
def generate_seed_wo_ignore(localization, train_boat=False):
"""
This function generate seed with priority strategy
:param localization:
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# generate background seed
mask = np.ones((h, w), dtype=np.int) * 21
bg_ind = np.where(localization[:, :, 0])
mask[bg_ind[0], bg_ind[1]] = 0
# generate foreground seed in the order of their area
area = np.sum(localization, axis=(0, 1))
cls_order = np.argsort(area)[::-1] # area in descending order
for cls in cls_order:
if area[cls] == 0:
break
ind = np.where(localization[:, :, cls])
mask[ind[0], ind[1]] = cls
if train_boat:
train_boat_ind = np.where(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))
mask[train_boat_ind] = 0
return mask
def get_localization_cues_sec(att_maps, saliency, im_label, cam_thresh):
"""get localization cues with method in SEC paper
perform hard thresholding for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
localization1 = np.zeros(shape=(h, w, 21))
for idx in im_label: # idx: aero=1
heat_map = att_maps[:, :, idx - 1]
localization1[:, :, idx] = heat_map > cam_thresh * np.max(heat_map)
# bg_cue = saliency.astype(np.float32)
# bg_cue = bg_cue / 255
bg_cue = nd.zoom(saliency, (h / im_h, h / im_w), order=1)
localization1[:, :, 0] = bg_cue < 0.06
# handle conflict seed
if args.ignore_conflict:
seg_mask = generate_seed_with_ignore(localization1)
else:
seg_mask = generate_seed_wo_ignore(localization1, train_boat=True)
return seg_mask
def get_localization_cues_dcsp(att_maps, saliency, im_label, bg_thresh):
"""get localization cues with method in DCSP paper
compute harmonic mean for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
re_sal = nd.zoom(saliency, (h / im_h, w / im_w), order=1)
localization1 = np.zeros(shape=(h, w, 20))
for idx in im_label: # idx: aero=1
localization1[:, :, idx - 1] = 2 / ((1 / (att_maps[:, :, idx - 1] + 1e-7)) + (1 / (re_sal + 1e-7)))
hm_max = np.max(localization1, axis=2)
seg_mask = np.argmax(localization1, axis=2) + 1
seg_mask[hm_max < bg_thresh] = 0
return seg_mask
def filter_weight_dict(weight_dict, model_dict):
# filter the parameters that exist in the pretrained model
pretrained_dict = dict()
for k, v in weight_dict.items():
# keep compatable with the previous version of network definition
if "conv" in k and "backbone" not in k:
k = "backbone." + k
if k in model_dict:
pretrained_dict[k] = v
model_dict.update(pretrained_dict)
return model_dict
if __name__ == '__main__':
args = parse_args()
device = torch.device("cuda:0")
# input and output
im_tags = pickle.load(open(args.cue_file, "rb"))
if not osp.exists(args.res_path):
os.mkdir(args.res_path)
_, test_dataset = get_dataset(args.dataset_name, args)
batch_size = 8
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
# load net and trained weights
model = SGAN(backbone_name=args.backbone)
weight_dict = torch.load(osp.join(args.save_model_path, args.cfg_name, "model_iter_" + str(args.max_iter) + ".pth"))
model_dict = filter_weight_dict(weight_dict, model.state_dict())
model.load_state_dict(model_dict)
model = model.to(device)
model.eval()
save_path = osp.join(args.res_path, args.cfg_name + args.test_cfg)
if not osp.exists(save_path):
os.makedirs(save_path)
# compute class activation map
with torch.no_grad():
for num, pack in enumerate(test_loader):
names, imgs, labels = pack[0], pack[1].to(device, dtype=torch.float32), \
pack[2].numpy()
fg_sim = pack[3].to(device, dtype=torch.float32)
bg_sim = pack[4].to(device, dtype=torch.float32)
sizes = pack[6].to("cpu").numpy()
if args.combine_seedseg:
_, segs, cams = model.forward_cam(imgs, fg_sim, bg_sim)
cams = cams + segs
# cams = segs
else:
_, _, cams = model.forward_cam(imgs, fg_sim, bg_sim)
np_cams = np.transpose(cams.cpu().numpy(), (0, 2, 3, 1))
_, h, w, c = np_cams.shape
for k, name in enumerate(names):
# get output cam
im_label = im_tags[name]
im_h, im_w = sizes[k]
np_cam = np_cams[k]
# get saliency
bg_cue = cv2.imread(osp.join(args.dataset_root, "sal", args.sdnet_path, name + ".png"), cv2.IMREAD_GRAYSCALE)
bg_cue = bg_cue.astype(np.float32)
bg_cue = bg_cue / 255
seg_mask = get_localization_cues_sec(np_cam, bg_cue, im_label, args.cam_thresh)
# save mask
write_mask = nd.zoom(seg_mask, (im_h / h, im_w / w), order=0)
cv2.imwrite(osp.join(save_path, name + ".png"), write_mask)
| [
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"scipy.ndimage.zoom",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"numpy.max",
"os.mkdir",
"lib.network.sgan.SGAN",
"numpy.ones",
"numpy.argmax",
"torch.device",
"os.makedirs",
"os.path.join",
"numpy.sum",
"numpy.zer... | [((922, 959), 'numpy.array', 'np.array', (['[104.008, 116.669, 122.675]'], {}), '([104.008, 116.669, 122.675])\n', (930, 959), True, 'import numpy as np\n'), ((686, 711), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (709, 711), False, 'import argparse\n'), ((816, 852), 'lib.utils.pyutils.read_yaml2cls', 'pyutils.read_yaml2cls', (['args.cfg_file'], {}), '(args.cfg_file)\n', (837, 852), False, 'from lib.utils import pyutils\n'), ((1051, 1066), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1059, 1066), True, 'import numpy as np\n'), ((1257, 1286), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1271, 1286), True, 'import numpy as np\n'), ((1299, 1322), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1315, 1322), False, 'import torch\n'), ((1647, 1675), 'numpy.sum', 'np.sum', (['localization'], {'axis': '(2)'}), '(localization, axis=2)\n', (1653, 1675), True, 'import numpy as np\n'), ((1695, 1716), 'numpy.where', 'np.where', (['(sum_loc > 1)'], {}), '(sum_loc > 1)\n', (1703, 1716), True, 'import numpy as np\n'), ((1840, 1862), 'numpy.where', 'np.where', (['localization'], {}), '(localization)\n', (1848, 1862), True, 'import numpy as np\n'), ((2306, 2337), 'numpy.where', 'np.where', (['localization[:, :, 0]'], {}), '(localization[:, :, 0])\n', (2314, 2337), True, 'import numpy as np\n'), ((2443, 2476), 'numpy.sum', 'np.sum', (['localization'], {'axis': '(0, 1)'}), '(localization, axis=(0, 1))\n', (2449, 2476), True, 'import numpy as np\n'), ((3372, 3398), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 21)'}), '(shape=(h, w, 21))\n', (3380, 3398), True, 'import numpy as np\n'), ((3644, 3692), 'scipy.ndimage.zoom', 'nd.zoom', (['saliency', '(h / im_h, h / im_w)'], {'order': '(1)'}), '(saliency, (h / im_h, h / im_w), order=1)\n', (3651, 3692), True, 'import scipy.ndimage as nd\n'), ((4458, 4506), 'scipy.ndimage.zoom', 'nd.zoom', (['saliency', '(h / im_h, w / im_w)'], {'order': '(1)'}), '(saliency, (h / im_h, w / im_w), order=1)\n', (4465, 4506), True, 'import scipy.ndimage as nd\n'), ((4527, 4553), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 20)'}), '(shape=(h, w, 20))\n', (4535, 4553), True, 'import numpy as np\n'), ((4714, 4743), 'numpy.max', 'np.max', (['localization1'], {'axis': '(2)'}), '(localization1, axis=2)\n', (4720, 4743), True, 'import numpy as np\n'), ((5382, 5404), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5394, 5404), False, 'import torch\n'), ((5579, 5615), 'lib.dataset.get_dataset.get_dataset', 'get_dataset', (['args.dataset_name', 'args'], {}), '(args.dataset_name, args)\n', (5590, 5615), False, 'from lib.dataset.get_dataset import get_dataset\n'), ((5654, 5743), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(8)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=8)\n', (5664, 5743), False, 'from torch.utils.data import DataLoader\n'), ((5788, 5821), 'lib.network.sgan.SGAN', 'SGAN', ([], {'backbone_name': 'args.backbone'}), '(backbone_name=args.backbone)\n', (5792, 5821), False, 'from lib.network.sgan import SGAN\n'), ((6114, 6168), 'os.path.join', 'osp.join', (['args.res_path', '(args.cfg_name + args.test_cfg)'], {}), '(args.res_path, args.cfg_name + args.test_cfg)\n', (6122, 6168), True, 'import os.path as osp\n'), ((1874, 1909), 'numpy.ones', 'np.ones', ([], {'shape': '(h, w)', 'dtype': 'np.int'}), '(shape=(h, w), dtype=np.int)\n', (1881, 1909), True, 'import numpy as np\n'), ((2258, 2287), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.int'}), '((h, w), dtype=np.int)\n', (2265, 2287), True, 'import numpy as np\n'), ((2493, 2509), 'numpy.argsort', 'np.argsort', (['area'], {}), '(area)\n', (2503, 2509), True, 'import numpy as np\n'), ((2629, 2662), 'numpy.where', 'np.where', (['localization[:, :, cls]'], {}), '(localization[:, :, cls])\n', (2637, 2662), True, 'import numpy as np\n'), ((2743, 2812), 'numpy.where', 'np.where', (['(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))'], {}), '(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))\n', (2751, 2812), True, 'import numpy as np\n'), ((4759, 4791), 'numpy.argmax', 'np.argmax', (['localization1'], {'axis': '(2)'}), '(localization1, axis=2)\n', (4768, 4791), True, 'import numpy as np\n'), ((5493, 5518), 'os.path.exists', 'osp.exists', (['args.res_path'], {}), '(args.res_path)\n', (5503, 5518), True, 'import os.path as osp\n'), ((5528, 5551), 'os.mkdir', 'os.mkdir', (['args.res_path'], {}), '(args.res_path)\n', (5536, 5551), False, 'import os, pickle\n'), ((6180, 6201), 'os.path.exists', 'osp.exists', (['save_path'], {}), '(save_path)\n', (6190, 6201), True, 'import os.path as osp\n'), ((6211, 6233), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (6222, 6233), False, 'import os, pickle\n'), ((6279, 6294), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6292, 6294), False, 'import torch\n'), ((3541, 3557), 'numpy.max', 'np.max', (['heat_map'], {}), '(heat_map)\n', (3547, 3557), True, 'import numpy as np\n'), ((7637, 7685), 'scipy.ndimage.zoom', 'nd.zoom', (['seg_mask', '(im_h / h, im_w / w)'], {'order': '(0)'}), '(seg_mask, (im_h / h, im_w / w), order=0)\n', (7644, 7685), True, 'import scipy.ndimage as nd\n'), ((7303, 7369), 'os.path.join', 'osp.join', (['args.dataset_root', '"""sal"""', 'args.sdnet_path', "(name + '.png')"], {}), "(args.dataset_root, 'sal', args.sdnet_path, name + '.png')\n", (7311, 7369), True, 'import os.path as osp\n'), ((7714, 7748), 'os.path.join', 'osp.join', (['save_path', "(name + '.png')"], {}), "(save_path, name + '.png')\n", (7722, 7748), True, 'import os.path as osp\n')] |
import os
import shutil
from shapely.geometry import Polygon
from .. import osm_create_maps
from .. import util
TEST_FP = os.path.dirname(os.path.abspath(__file__))
def test_get_width():
assert osm_create_maps.get_width('15.2') == 15
assert osm_create_maps.get_width('') == 0
assert osm_create_maps.get_width("['14.9', '12.2']") == 0
assert osm_create_maps.get_width('t') == 0
def test_get_speed():
assert osm_create_maps.get_speed('') == 0
assert osm_create_maps.get_speed('signals') == 0
assert osm_create_maps.get_speed('60') == 60
assert osm_create_maps.get_speed("['90', '100']") == 100
def test_reproject_and_clean_feats(tmpdir):
tmppath = tmpdir.strpath
shutil.copy(
TEST_FP + '/data/processed/maps/osm_elements.geojson',
tmppath
)
# For now, just make sure it runs
osm_create_maps.clean_ways(
tmppath + '/osm_elements.geojson',
tmppath + '/docs'
)
def test_expand_polygon():
test_polygon = {
'type': 'Polygon',
'coordinates': [[[-71.0770265, 42.3364517], [-71.0810509, 42.3328703],
[-71.0721386, 42.3325241]]]
}
points_file = os.path.join(TEST_FP, 'data', 'osm_crash_file.json')
# Too many points fall outside of the polygon to buffer
result = osm_create_maps.expand_polygon(test_polygon, points_file)
assert result is None
polygon_coords = [util.get_reproject_point(
x[1], x[0], coords=True) for x in test_polygon['coordinates'][0]]
orig_shape = Polygon(polygon_coords)
result = osm_create_maps.expand_polygon(test_polygon, points_file,
max_percent=.7)
result_coords = [util.get_reproject_point(
x[1], x[0], coords=True) for x in result.exterior.coords]
result_shape = Polygon(result_coords)
# Check whether the new polygon has a larger area than the old one
assert result_shape.area > orig_shape.area
records = util.read_records(points_file, 'crash')
# The first two points are outside the original shape
# and the last point is within
assert orig_shape.contains(records[0].point) is False
assert orig_shape.contains(records[1].point) is False
assert orig_shape.contains(records[2].point)
# The first point should be within the new shape, but not the
# second point, since it was too far from the original shape
assert result_shape.contains(records[0].point)
assert result_shape.contains(records[1].point) is False
assert result_shape.contains(records[2].point)
| [
"os.path.abspath",
"shapely.geometry.Polygon",
"os.path.join",
"shutil.copy"
] | [((139, 164), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os\n'), ((709, 784), 'shutil.copy', 'shutil.copy', (["(TEST_FP + '/data/processed/maps/osm_elements.geojson')", 'tmppath'], {}), "(TEST_FP + '/data/processed/maps/osm_elements.geojson', tmppath)\n", (720, 784), False, 'import shutil\n'), ((1187, 1239), 'os.path.join', 'os.path.join', (['TEST_FP', '"""data"""', '"""osm_crash_file.json"""'], {}), "(TEST_FP, 'data', 'osm_crash_file.json')\n", (1199, 1239), False, 'import os\n'), ((1538, 1561), 'shapely.geometry.Polygon', 'Polygon', (['polygon_coords'], {}), '(polygon_coords)\n', (1545, 1561), False, 'from shapely.geometry import Polygon\n'), ((1827, 1849), 'shapely.geometry.Polygon', 'Polygon', (['result_coords'], {}), '(result_coords)\n', (1834, 1849), False, 'from shapely.geometry import Polygon\n')] |
import unittest
import numpy as np
import tensorflow as tf
from megnet.losses import mean_squared_error_with_scale
class TestLosses(unittest.TestCase):
def test_mse(self):
x = np.array([0.1, 0.2, 0.3])
y = np.array([0.05, 0.15, 0.25])
loss = mean_squared_error_with_scale(x, y, scale=100)
self.assertAlmostEqual(loss.numpy(), np.mean((x - y) ** 2) * 100)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.mean",
"megnet.losses.mean_squared_error_with_scale"
] | [((428, 443), 'unittest.main', 'unittest.main', ([], {}), '()\n', (441, 443), False, 'import unittest\n'), ((192, 217), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (200, 217), True, 'import numpy as np\n'), ((230, 258), 'numpy.array', 'np.array', (['[0.05, 0.15, 0.25]'], {}), '([0.05, 0.15, 0.25])\n', (238, 258), True, 'import numpy as np\n'), ((274, 320), 'megnet.losses.mean_squared_error_with_scale', 'mean_squared_error_with_scale', (['x', 'y'], {'scale': '(100)'}), '(x, y, scale=100)\n', (303, 320), False, 'from megnet.losses import mean_squared_error_with_scale\n'), ((366, 387), 'numpy.mean', 'np.mean', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (373, 387), True, 'import numpy as np\n')] |
import datetime
import requests
import socket
import random
import sys
import time
def now():
a=datetime.fromtimestamp(time.time())
return a.strftime("%H:%M:%S %Y-%m-%d")
def getmyip():
a=requests.get('http://checkip.dyndns.org')
a=a.content
b=a[76:89]
return b
class node:
def __init__(self, listeningport):
timestamp=time.time()
self.listeningport=listeningport
self.timestamp=timestamp
self.hashid=hashlib.sha256(str(timestamp+random.random()*1000000)).hexdigest()
inth=int(self.hashid,16)
self.hashvector=[0]*vectorlength
self.neighbors=[[-1,'',8888]]*max_neighbors #list of 2 element arrays of HASHID, IP ADDRESS, AND THEIR PORT
self.ip=homeip
self.logs=''
r=0
while inth>0:
self.hashvector[r]=int(inth%elementlength)
inth=inth/elementlength
r=r+1
self.sockets=[0]*(max_neighbors+1) #first socket should be SERVER socket
#listening socket
self.sockets[0]=self.create_socket('',self.listeningport)
#self.create_socket('',listeningport,0)
| [
"random.random",
"time.time",
"requests.get"
] | [((202, 243), 'requests.get', 'requests.get', (['"""http://checkip.dyndns.org"""'], {}), "('http://checkip.dyndns.org')\n", (214, 243), False, 'import requests\n'), ((124, 135), 'time.time', 'time.time', ([], {}), '()\n', (133, 135), False, 'import time\n'), ((361, 372), 'time.time', 'time.time', ([], {}), '()\n', (370, 372), False, 'import time\n'), ((496, 511), 'random.random', 'random.random', ([], {}), '()\n', (509, 511), False, 'import random\n')] |
#!/usr/bin/python
import RPi.GPIO as GPIO
from picamera import PiCamera
import time
import datetime
PIN = 12
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.IN)
camera = PiCamera()
camera.rotation = 180
camera.resolution = (1024, 576)
#camera.start_preview()
#sleep(20)
#camera.stop_preview()
while True:
time.sleep(0.5)
#print("waiting...")
if (GPIO.input(PIN)):
print("Taking a picture...")
#camera.start_preview()
time.sleep(0.2)
ts = time.time()
tstr = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
fname = "/home/pi/figs/image_%s.jpg" % (tstr)
camera.capture(fname)
time.sleep(3)
#camera.stop_preview()
else:
print("PIN down...")
| [
"datetime.datetime.fromtimestamp",
"RPi.GPIO.setup",
"picamera.PiCamera",
"time.sleep",
"RPi.GPIO.input",
"time.time",
"RPi.GPIO.setmode"
] | [((113, 135), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (125, 135), True, 'import RPi.GPIO as GPIO\n'), ((136, 160), 'RPi.GPIO.setup', 'GPIO.setup', (['PIN', 'GPIO.IN'], {}), '(PIN, GPIO.IN)\n', (146, 160), True, 'import RPi.GPIO as GPIO\n'), ((171, 181), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (179, 181), False, 'from picamera import PiCamera\n'), ((314, 329), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (324, 329), False, 'import time\n'), ((363, 378), 'RPi.GPIO.input', 'GPIO.input', (['PIN'], {}), '(PIN)\n', (373, 378), True, 'import RPi.GPIO as GPIO\n'), ((467, 482), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (477, 482), False, 'import time\n'), ((505, 516), 'time.time', 'time.time', ([], {}), '()\n', (514, 516), False, 'import time\n'), ((699, 712), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (709, 712), False, 'import time\n'), ((532, 567), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (563, 567), False, 'import datetime\n')] |
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework.test import APITestCase
class BaseAPITest(APITestCase):
def setUp(self, password=None) -> None:
self.user = User(username="John Smith", email="<EMAIL>")
self.user.set_password("<PASSWORD>")
self.user.save()
self.client.force_authenticate(user=self.user)
def user_factory(self, username="peter", email="<EMAIL>", password="<PASSWORD>"):
user = User(username=username, email=email, password=password)
user.save()
return user
class ResetPasswordAPITest(BaseAPITest):
def test_request_password_with_no_settings(self):
# make sure that if no setting, the default password request reset field is the email.
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 400)
data = {"email": user.email}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
def test_request_password_with_django_rest_lookup_field_setting(self):
# Make sure we can still use DJANGO_REST_LOOKUP_FIELD setting for backward compatibility.
settings.DJANGO_REST_LOOKUP_FIELD = "username"
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
def test_request_password_with_django_rest_lookup_fields_setting(self):
# Make sure new users can use DJANGO_REST_LOOKUP_FIELDS setting.
settings.DJANGO_REST_LOOKUP_FIELDS = ["email", "username"]
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
data = {"email": user.email}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
| [
"django.contrib.auth.models.User",
"django.urls.reverse"
] | [((251, 295), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""John Smith"""', 'email': '"""<EMAIL>"""'}), "(username='John Smith', email='<EMAIL>')\n", (255, 295), False, 'from django.contrib.auth.models import User\n'), ((523, 578), 'django.contrib.auth.models.User', 'User', ([], {'username': 'username', 'email': 'email', 'password': 'password'}), '(username=username, email=email, password=password)\n', (527, 578), False, 'from django.contrib.auth.models import User\n'), ((922, 955), 'django.urls.reverse', 'reverse', (['"""reset-password-request"""'], {}), "('reset-password-request')\n", (929, 955), False, 'from django.urls import reverse\n'), ((1094, 1127), 'django.urls.reverse', 'reverse', (['"""reset-password-request"""'], {}), "('reset-password-request')\n", (1101, 1127), False, 'from django.urls import reverse\n'), ((1672, 1705), 'django.urls.reverse', 'reverse', (['"""reset-password-request"""'], {}), "('reset-password-request')\n", (1679, 1705), False, 'from django.urls import reverse\n'), ((2239, 2272), 'django.urls.reverse', 'reverse', (['"""reset-password-request"""'], {}), "('reset-password-request')\n", (2246, 2272), False, 'from django.urls import reverse\n'), ((2550, 2583), 'django.urls.reverse', 'reverse', (['"""reset-password-request"""'], {}), "('reset-password-request')\n", (2557, 2583), False, 'from django.urls import reverse\n')] |
"""Core pytorch operations regarding optimization (optimize, schedule) are placed in general tests."""
import pytest
import torch
import torchtraining.pytorch as P
def test_backward():
backward = P.Backward()
x = torch.randn(10, requires_grad=True)
y = x ** 2
backward(y.sum())
assert x.grad is not None
| [
"torch.randn",
"torchtraining.pytorch.Backward"
] | [((202, 214), 'torchtraining.pytorch.Backward', 'P.Backward', ([], {}), '()\n', (212, 214), True, 'import torchtraining.pytorch as P\n'), ((223, 258), 'torch.randn', 'torch.randn', (['(10)'], {'requires_grad': '(True)'}), '(10, requires_grad=True)\n', (234, 258), False, 'import torch\n')] |
"""
Emoji extras for Material.
Override the indexes with an extended version that includes short names for Material icons, FontAwesome, etc.
"""
import os
import glob
import copy
import codecs
import inspect
import material
import pymdownx
from pymdownx.emoji import TWEMOJI_SVG_CDN, add_attriubtes
import xml.etree.ElementTree as etree # noqa: N813
OPTION_SUPPORT = pymdownx.__version_info__ >= (7, 1, 0)
RESOURCES = os.path.dirname(inspect.getfile(material))
def _patch_index(options):
"""Patch the given index."""
import pymdownx.twemoji_db as twemoji_db
# Copy the Twemoji index
index = {
"name": 'twemoji',
"emoji": copy.deepcopy(twemoji_db.emoji) if not OPTION_SUPPORT else twemoji_db.emoji,
"aliases": copy.deepcopy(twemoji_db.aliases) if not OPTION_SUPPORT else twemoji_db.aliases
}
icon_locations = options.get('custom_icons', [])
icon_locations.append(os.path.join(RESOURCES, '.icons'))
# Find our icons
for icon_path in icon_locations:
norm_base = icon_path.replace('\\', '/') + '/'
for result in glob.glob(icon_path.replace('\\', '/') + '/**/*.svg', recursive=True):
name = ':{}:'.format(result.replace('\\', '/').replace(norm_base, '', 1).replace('/', '-').lstrip('.')[:-4])
if name not in index['emoji'] and name not in index['aliases']:
# Easiest to just store the path and pull it out from the index
index["emoji"][name] = {'name': name, 'path': result}
return index
if OPTION_SUPPORT: # pragma: no cover
def twemoji(options, md):
"""Provide a copied Twemoji index with additional codes for Material included icons."""
return _patch_index(options)
else: # pragma: no cover
def twemoji():
"""Provide a copied Twemoji index with additional codes for Material included icons."""
return _patch_index({})
def to_svg(index, shortname, alias, uc, alt, title, category, options, md):
"""Return SVG element."""
is_unicode = uc is not None
if is_unicode:
# Handle Twemoji emoji.
svg_path = TWEMOJI_SVG_CDN
attributes = {
"class": options.get('classes', index),
"alt": alt,
"src": "%s%s.svg" % (
options.get('image_path', svg_path),
uc
)
}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
return etree.Element("img", attributes)
else:
# Handle Material SVG assets.
el = etree.Element('span', {"class": options.get('classes', index)})
svg_path = md.inlinePatterns['emoji'].emoji_index['emoji'][shortname]['path']
with codecs.open(svg_path, 'r', encoding='utf-8') as f:
el.text = md.htmlStash.store(f.read())
return el
| [
"pymdownx.emoji.add_attriubtes",
"os.path.join",
"inspect.getfile",
"xml.etree.ElementTree.Element",
"copy.deepcopy",
"codecs.open"
] | [((437, 462), 'inspect.getfile', 'inspect.getfile', (['material'], {}), '(material)\n', (452, 462), False, 'import inspect\n'), ((922, 955), 'os.path.join', 'os.path.join', (['RESOURCES', '""".icons"""'], {}), "(RESOURCES, '.icons')\n", (934, 955), False, 'import os\n'), ((2434, 2469), 'pymdownx.emoji.add_attriubtes', 'add_attriubtes', (['options', 'attributes'], {}), '(options, attributes)\n', (2448, 2469), False, 'from pymdownx.emoji import TWEMOJI_SVG_CDN, add_attriubtes\n'), ((2486, 2518), 'xml.etree.ElementTree.Element', 'etree.Element', (['"""img"""', 'attributes'], {}), "('img', attributes)\n", (2499, 2518), True, 'import xml.etree.ElementTree as etree\n'), ((660, 691), 'copy.deepcopy', 'copy.deepcopy', (['twemoji_db.emoji'], {}), '(twemoji_db.emoji)\n', (673, 691), False, 'import copy\n'), ((756, 789), 'copy.deepcopy', 'copy.deepcopy', (['twemoji_db.aliases'], {}), '(twemoji_db.aliases)\n', (769, 789), False, 'import copy\n'), ((2743, 2787), 'codecs.open', 'codecs.open', (['svg_path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(svg_path, 'r', encoding='utf-8')\n", (2754, 2787), False, 'import codecs\n')] |
import pigpio
import time
class OdomDist(object):
"""
Take a tick input from odometry and compute the distance travelled
"""
def __init__(self, mm_per_tick, debug=False):
self.mm_per_tick = mm_per_tick
self.m_per_tick = mm_per_tick / 1000.0
self.meters = 0
self.last_time = time.time()
self.meters_per_second = 0
self.debug = debug
self.prev_ticks = 0
self.distance = 0
self.prev_distance = 0
self.ave_velocity = []
for i in range(10):
self.ave_velocity.append(0)
def run(self, ticks, throttle):
"""
inputs => total ticks since start
inputs => throttle, used to determine positive or negative vel
return => total dist (m), current vel (m/s), delta dist (m)
"""
#save off the last time interval and reset the timer
start_time = self.last_time
end_time = time.time()
self.last_time = end_time
#calculate elapsed time and distance traveled
seconds = end_time - start_time
self.distance = ticks * self.m_per_tick #converted to meters here
# if throttle < 0.0:
# print("throttle is negative")
# self.distance = self.distance * -1.0
delta_distance = self.distance - self.prev_distance
instant_velocity = delta_distance/seconds
for i in range(9): # do a moving average over a 1/2 second window (10 readings of a 20Hz feed)
self.ave_velocity[9-i] = self.ave_velocity[8-i] # move the time window down one
self.ave_velocity[0] = instant_velocity # stick the latest reading at the start
velocity = sum(self.ave_velocity)/10 # moving average
#update the odometer values
self.meters += delta_distance
self.meters_per_second = velocity
self.prev_distance = self.distance
#console output for debugging
if(self.debug):
print('distance (m):', round(self.meters,3))
print('velocity (m/s):', round(self.meters_per_second,3))
return self.meters, self.meters_per_second, self.distance
class PiPGIOEncoder():
def __init__(self, pin, pi):
self.pin = pin
self.pi = pi
self.pi.set_mode(pin, pigpio.INPUT)
self.pi.set_pull_up_down(pin, pigpio.PUD_UP)
self.cb = pi.callback(self.pin, pigpio.FALLING_EDGE, self._cb)
self.count = 0
def _cb(self, pin, level, tick):
self.count += 1
def run(self):
return self.count
def shutdown(self):
if self.cb != None:
self.cb.cancel()
self.cb = None
self.pi.stop()
if __name__ == "__main__":
pi = pigpio.pi()
e = PiPGIOEncoder(4, pi)
while True:
time.sleep(0.1)
e.run()
| [
"pigpio.pi",
"time.sleep",
"time.time"
] | [((2735, 2746), 'pigpio.pi', 'pigpio.pi', ([], {}), '()\n', (2744, 2746), False, 'import pigpio\n'), ((323, 334), 'time.time', 'time.time', ([], {}), '()\n', (332, 334), False, 'import time\n'), ((941, 952), 'time.time', 'time.time', ([], {}), '()\n', (950, 952), False, 'import time\n'), ((2800, 2815), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2810, 2815), False, 'import time\n')] |
# coding: utf-8
import pprint
import re
import six
class ActionSmnForwarding:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'region_name': 'str',
'project_id': 'str',
'theme_name': 'str',
'topic_urn': 'str',
'message_content': 'str',
'message_title': 'str'
}
attribute_map = {
'region_name': 'region_name',
'project_id': 'project_id',
'theme_name': 'theme_name',
'topic_urn': 'topic_urn',
'message_content': 'message_content',
'message_title': 'message_title'
}
def __init__(self, region_name=None, project_id=None, theme_name=None, topic_urn=None, message_content=None, message_title=None):
"""ActionSmnForwarding - a model defined in huaweicloud sdk"""
self._region_name = None
self._project_id = None
self._theme_name = None
self._topic_urn = None
self._message_content = None
self._message_title = None
self.discriminator = None
self.region_name = region_name
self.project_id = project_id
self.theme_name = theme_name
self.topic_urn = topic_urn
self.message_content = message_content
self.message_title = message_title
@property
def region_name(self):
"""Gets the region_name of this ActionSmnForwarding.
SMN服务对应的region区域
:return: The region_name of this ActionSmnForwarding.
:rtype: str
"""
return self._region_name
@region_name.setter
def region_name(self, region_name):
"""Sets the region_name of this ActionSmnForwarding.
SMN服务对应的region区域
:param region_name: The region_name of this ActionSmnForwarding.
:type: str
"""
self._region_name = region_name
@property
def project_id(self):
"""Gets the project_id of this ActionSmnForwarding.
SMN服务对应的projectId信息
:return: The project_id of this ActionSmnForwarding.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ActionSmnForwarding.
SMN服务对应的projectId信息
:param project_id: The project_id of this ActionSmnForwarding.
:type: str
"""
self._project_id = project_id
@property
def theme_name(self):
"""Gets the theme_name of this ActionSmnForwarding.
SMN服务对应的主题名称
:return: The theme_name of this ActionSmnForwarding.
:rtype: str
"""
return self._theme_name
@theme_name.setter
def theme_name(self, theme_name):
"""Sets the theme_name of this ActionSmnForwarding.
SMN服务对应的主题名称
:param theme_name: The theme_name of this ActionSmnForwarding.
:type: str
"""
self._theme_name = theme_name
@property
def topic_urn(self):
"""Gets the topic_urn of this ActionSmnForwarding.
SMN服务对应的topic的主题URN
:return: The topic_urn of this ActionSmnForwarding.
:rtype: str
"""
return self._topic_urn
@topic_urn.setter
def topic_urn(self, topic_urn):
"""Sets the topic_urn of this ActionSmnForwarding.
SMN服务对应的topic的主题URN
:param topic_urn: The topic_urn of this ActionSmnForwarding.
:type: str
"""
self._topic_urn = topic_urn
@property
def message_content(self):
"""Gets the message_content of this ActionSmnForwarding.
短信或邮件的内容。
:return: The message_content of this ActionSmnForwarding.
:rtype: str
"""
return self._message_content
@message_content.setter
def message_content(self, message_content):
"""Sets the message_content of this ActionSmnForwarding.
短信或邮件的内容。
:param message_content: The message_content of this ActionSmnForwarding.
:type: str
"""
self._message_content = message_content
@property
def message_title(self):
"""Gets the message_title of this ActionSmnForwarding.
短信或邮件的主题。
:return: The message_title of this ActionSmnForwarding.
:rtype: str
"""
return self._message_title
@message_title.setter
def message_title(self, message_title):
"""Sets the message_title of this ActionSmnForwarding.
短信或邮件的主题。
:param message_title: The message_title of this ActionSmnForwarding.
:type: str
"""
self._message_title = message_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActionSmnForwarding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((4995, 5028), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (5008, 5028), False, 'import six\n')] |
"""Contains code that stitches together different parts of the library. By containing most side effects here the rest
of the code can be more deterministic and testable.
This code should not be unit tested.
"""
import os
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import yaml
from pydantic import ValidationError
from typhoon.core.components import Component
from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors
from typhoon.core.settings import Settings
from typing_extensions import Literal
from typhoon.introspection.introspect_extensions import get_typhoon_extensions_info
def transpile_dag_and_store(dag: dict, output_folder_path: Union[str, Path], debug_mode: bool):
from typhoon.core.transpiler.dag_transpiler import DagFile
from typhoon.core.transpiler.task_transpiler import TasksFile
output_folder_path = Path(output_folder_path)
output_folder_path.mkdir(parents=True, exist_ok=True)
dag = DAGDefinitionV2.parse_obj(dag)
dag_code = DagFile(dag, debug_mode=debug_mode).render()
(output_folder_path / f'{dag.name}.py').write_text(dag_code)
tasks_code = TasksFile(dag.tasks).render()
(output_folder_path / 'tasks.py').write_text(tasks_code)
def load_dag_definitions(ignore_errors: bool = False) -> List[Tuple[DAGDefinitionV2, Path]]:
add_yaml_constructors()
dags = []
for dag_file in Settings.dags_directory.rglob('*.yml'):
try:
dag = DAGDefinitionV2.parse_obj(
yaml.load(dag_file.read_text(), yaml.FullLoader)
)
except ValidationError:
if ignore_errors:
continue
else:
raise
dags.append((dag, dag_file))
return dags
def load_dag_definition(dag_name: str, ignore_errors: bool = False) -> Optional[DAGDefinitionV2]:
dags = load_dag_definitions(ignore_errors)
matching_dags = [dag for dag, _ in dags if dag.name == dag_name]
assert len(matching_dags) <= 1, f'Found {len(matching_dags)} dags with name "{dag_name}"'
return matching_dags[0] if len(matching_dags) == 1 else None
def get_dag_errors() -> Dict[str, List[dict]]:
add_yaml_constructors()
result = {}
for dag_file in Settings.dags_directory.rglob('*.yml'):
try:
DAGDefinitionV2.parse_obj(
yaml.load(dag_file.read_text(), yaml.FullLoader)
)
except ValidationError as e:
result[dag_file.name.split('.yml')[0]] = e.errors()
return result
def get_dags_contents(dags_directory: Union[str, Path]) -> List[str]:
dags_directory = Path(dags_directory)
dags = []
for dag_file in dags_directory.rglob('*.yml'):
dags.append(dag_file.read_text())
return dags
def get_dag_filenames():
dag_files = filter(lambda x: x.endswith('.yml'), os.listdir(str(Settings.dags_directory)))
return dag_files
def load_component(
component_name: str,
ignore_errors: bool = False,
kind: Literal['typhoon', 'custom', 'all'] = 'all',
) -> Optional[Component]:
assert kind in ['typhoon', 'custom', 'all'], f'Kind should be one of ["typhoon", "custom", "all"]. Found: {kind}'
components = load_components(ignore_errors, kind)
matching_components = [(component, code) for component, code in components if component.name == component_name]
assert len(matching_components) <= 1, f'Found {len(matching_components)} components with name "{component_name}"'
return matching_components[0][0] if len(matching_components) == 1 else None
def load_components(
ignore_errors: bool = False,
kind: Literal['typhoon', 'custom', 'all'] = 'all',
) -> List[Tuple[Component, str]]:
if kind == 'all':
return [(c, cs) for c, cs in load_component_definitions(ignore_errors, kind='typhoon')] + \
[(c, cs) for c, cs in load_component_definitions(ignore_errors, kind='custom')]
else:
return [(c, cs) for c, cs in load_component_definitions(ignore_errors, kind)]
def load_component_definitions(
ignore_errors,
kind=Literal['typhoon', 'custom'],
) -> List[Tuple[Component, str]]:
add_yaml_constructors()
if kind == 'custom':
component_files = list(Settings.components_directory.rglob('*.yml'))
else:
component_files = [Path(x) for x in get_typhoon_extensions_info()['components'].values()]
components = []
for component_file in component_files:
if ignore_errors:
try:
comp = Component.parse_obj(yaml.load(component_file.read_text(), yaml.FullLoader))
except ValidationError:
continue
else:
comp = Component.parse_obj(yaml.load(component_file.read_text(), yaml.FullLoader))
components.append((comp, component_file.read_text()))
return components
| [
"typhoon.introspection.introspect_extensions.get_typhoon_extensions_info",
"pathlib.Path",
"typhoon.core.dags.add_yaml_constructors",
"typhoon.core.settings.Settings.dags_directory.rglob",
"typhoon.core.dags.DAGDefinitionV2.parse_obj",
"typhoon.core.transpiler.task_transpiler.TasksFile",
"typhoon.core.t... | [((885, 909), 'pathlib.Path', 'Path', (['output_folder_path'], {}), '(output_folder_path)\n', (889, 909), False, 'from pathlib import Path\n'), ((978, 1008), 'typhoon.core.dags.DAGDefinitionV2.parse_obj', 'DAGDefinitionV2.parse_obj', (['dag'], {}), '(dag)\n', (1003, 1008), False, 'from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors\n'), ((1341, 1364), 'typhoon.core.dags.add_yaml_constructors', 'add_yaml_constructors', ([], {}), '()\n', (1362, 1364), False, 'from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors\n'), ((1399, 1437), 'typhoon.core.settings.Settings.dags_directory.rglob', 'Settings.dags_directory.rglob', (['"""*.yml"""'], {}), "('*.yml')\n", (1428, 1437), False, 'from typhoon.core.settings import Settings\n'), ((2185, 2208), 'typhoon.core.dags.add_yaml_constructors', 'add_yaml_constructors', ([], {}), '()\n', (2206, 2208), False, 'from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors\n'), ((2245, 2283), 'typhoon.core.settings.Settings.dags_directory.rglob', 'Settings.dags_directory.rglob', (['"""*.yml"""'], {}), "('*.yml')\n", (2274, 2283), False, 'from typhoon.core.settings import Settings\n'), ((2629, 2649), 'pathlib.Path', 'Path', (['dags_directory'], {}), '(dags_directory)\n', (2633, 2649), False, 'from pathlib import Path\n'), ((4181, 4204), 'typhoon.core.dags.add_yaml_constructors', 'add_yaml_constructors', ([], {}), '()\n', (4202, 4204), False, 'from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors\n'), ((1024, 1059), 'typhoon.core.transpiler.dag_transpiler.DagFile', 'DagFile', (['dag'], {'debug_mode': 'debug_mode'}), '(dag, debug_mode=debug_mode)\n', (1031, 1059), False, 'from typhoon.core.transpiler.dag_transpiler import DagFile\n'), ((1151, 1171), 'typhoon.core.transpiler.task_transpiler.TasksFile', 'TasksFile', (['dag.tasks'], {}), '(dag.tasks)\n', (1160, 1171), False, 'from typhoon.core.transpiler.task_transpiler import TasksFile\n'), ((4261, 4305), 'typhoon.core.settings.Settings.components_directory.rglob', 'Settings.components_directory.rglob', (['"""*.yml"""'], {}), "('*.yml')\n", (4296, 4305), False, 'from typhoon.core.settings import Settings\n'), ((4344, 4351), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (4348, 4351), False, 'from pathlib import Path\n'), ((4361, 4390), 'typhoon.introspection.introspect_extensions.get_typhoon_extensions_info', 'get_typhoon_extensions_info', ([], {}), '()\n', (4388, 4390), False, 'from typhoon.introspection.introspect_extensions import get_typhoon_extensions_info\n')] |
import os
import csv
import glob
import numpy as np
import pandas as pd
import nltk
import string
import re
from numpy import genfromtxt
from nltk import *
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
from nltk import word_tokenize
from nltk.util import ngrams
from collections import Counter
def statementDate(elem):
return elem[0]
def createRateMoves(pathToStatements,pathToMinutes,pathToCSV):
actionDF = pd.DataFrame()
targetRateHistDF = pd.DataFrame()
dailyRates = pd.read_csv(pathToCSV,dtype=object)
priorRate = 0
actionFlag = 0
previousDayValue = 0
direction = 'unchg'
for index,row in dailyRates.iterrows():
if(row['date'] > '20170101' and index < (len(dailyRates)-1)):
row['DFEDTAR'] = dailyRates.iloc[index+1,1]
#row['DFEDTAR'] = dailyRates[dailyRates['DFEDTAR']][index+1]
chg = float(row['DFEDTAR']) - float(priorRate)
if(chg>0):
direction='raise'
actionFlag = 1
elif(chg<0):
direction='lower'
actionFlag=1
else:
direction='unchg'
actionFlag=0
targetRateHistDF = targetRateHistDF.append({"Date":row['date'],"MinutesRelease":"","PriorRate": priorRate,"Rate":row['DFEDTAR'],"Direction":direction,"ActionFlag":int(actionFlag),"Change":chg},ignore_index=True)
priorRate = row['DFEDTAR']
for file in list(glob.glob(pathToStatements+'*.txt')):
actionDF = actionDF.append({"Date":str(file).split('/')[3].split('.')[0]},ignore_index=True)
targetRateHistDF = targetRateHistDF[['Date','MinutesRelease','PriorRate','Rate','Direction','ActionFlag','Change']]
#print(actionDF.loc[actionDF["Date"] == "20010103","Rate"])
actionDF = actionDF.sort_values(by=['Date'])
actionDF.index = pd.RangeIndex(len(actionDF.index))
targetRateHistDF = targetRateHistDF[targetRateHistDF['Date'].isin(actionDF['Date'].tolist())]
targetRateHistDF.index = pd.RangeIndex(len(targetRateHistDF.index))
# print(targetRateHistDF)
dateArray = []
for file in list(glob.glob(pathToMinutes+'*.txt')):
fileString = str(file).split('/')[3].split('.')[0].split('_')
dateArray.append([fileString[0],fileString[3]])
dateArray.sort(key=statementDate,reverse=True)
# print(dateArray)
for i in range(len(targetRateHistDF)):
meetingDate = targetRateHistDF.iloc[i,0]
for j in range(len(dateArray)):
if(meetingDate>dateArray[j][0]):
targetRateHistDF.iloc[i,1] = dateArray[j-1][1]
break
targetRateHistDF.iloc[0,1] = '20000323'
# print(targetRateHistDF)
targetRateHistDF.to_csv('../text/history/RatesDecision.csv',header=False, index=False, sep=',')
def main():
path = '../text/history/dailyRateHistory.csv'
pathTwo = '../text/statements/'
pathThree = '../text/minutes/'
createRateMoves(pathTwo,pathThree,path)
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"glob.glob",
"pandas.read_csv"
] | [((433, 447), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (445, 447), True, 'import pandas as pd\n'), ((469, 483), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (481, 483), True, 'import pandas as pd\n'), ((499, 535), 'pandas.read_csv', 'pd.read_csv', (['pathToCSV'], {'dtype': 'object'}), '(pathToCSV, dtype=object)\n', (510, 535), True, 'import pandas as pd\n'), ((1341, 1378), 'glob.glob', 'glob.glob', (["(pathToStatements + '*.txt')"], {}), "(pathToStatements + '*.txt')\n", (1350, 1378), False, 'import glob\n'), ((1994, 2028), 'glob.glob', 'glob.glob', (["(pathToMinutes + '*.txt')"], {}), "(pathToMinutes + '*.txt')\n", (2003, 2028), False, 'import glob\n')] |
import matplotlib.pyplot as plt
import numpy as np
def count_harmonic_numbers(n: int):
count = 0
for i in range(1, n+1): # 1 ~ N まで
for _ in range(i, n+1, i): # N以下の i の倍数
count += 1
return count
x = np.linspace(1, 10**5, 100, dtype='int')
y = list(map(lambda x: count_harmonic_numbers(x), x))
y2 = x * np.log(x)
print(y)
print(y2)
plt.plot(x, y, label="count")
plt.plot(x, y2, label="NlogN")
plt.plot(x, x, label="N")
plt.legend()
plt.show()
| [
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((238, 279), 'numpy.linspace', 'np.linspace', (['(1)', '(10 ** 5)', '(100)'], {'dtype': '"""int"""'}), "(1, 10 ** 5, 100, dtype='int')\n", (249, 279), True, 'import numpy as np\n'), ((370, 399), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""count"""'}), "(x, y, label='count')\n", (378, 399), True, 'import matplotlib.pyplot as plt\n'), ((400, 430), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'label': '"""NlogN"""'}), "(x, y2, label='NlogN')\n", (408, 430), True, 'import matplotlib.pyplot as plt\n'), ((431, 456), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'label': '"""N"""'}), "(x, x, label='N')\n", (439, 456), True, 'import matplotlib.pyplot as plt\n'), ((457, 469), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (467, 469), True, 'import matplotlib.pyplot as plt\n'), ((470, 480), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (478, 480), True, 'import matplotlib.pyplot as plt\n'), ((341, 350), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (347, 350), True, 'import numpy as np\n')] |
import pathlib
import numpy as np
def create_submission(path: pathlib.Path, predictions):
pred_with_id = np.stack([np.arange(len(predictions)), predictions], axis=1)
np.savetxt(
fname=path,
X=pred_with_id,
fmt="%d",
delimiter=",",
header="id,label",
comments="",
)
| [
"numpy.savetxt"
] | [((177, 277), 'numpy.savetxt', 'np.savetxt', ([], {'fname': 'path', 'X': 'pred_with_id', 'fmt': '"""%d"""', 'delimiter': '""","""', 'header': '"""id,label"""', 'comments': '""""""'}), "(fname=path, X=pred_with_id, fmt='%d', delimiter=',', header=\n 'id,label', comments='')\n", (187, 277), True, 'import numpy as np\n')] |
import json
import urllib
import utils as ut
from distutils.util import strtobool
class Call(object):
"""docstring for Call"""
def __init__(self, currentStrike, currentPrice, currentProbOTM, currentIV, currentITM):
self.currentStrike = currentStrike
self.currentPrice = currentPrice
self.currentProbOTM = currentProbOTM
self.currentIV = currentIV
self.currentITM = currentITM
class Put(object):
"""docstring for Put"""
def __init__(self, currentStrike, currentPrice, currentProbOTM, currentIV, currentITM):
self.currentStrike = currentStrike
self.currentPrice = currentPrice
self.currentProbOTM = currentProbOTM
self.currentIV = currentIV
self.currentITM = currentITM
def parseOptionsChain(ticker, expDate):
urlTicker = 'https://query2.finance.yahoo.com/v7/finance/options/' + ticker
urlDate = '?date=' + str(ut.formatExpiryURL(expDate))
url = urlTicker + urlDate
rawData = urllib.urlopen(url).read()
parsedTickerData = json.loads(rawData)
optionsJson = parsedTickerData['optionChain']['result'][0]
optionsQuote = optionsJson['quote']
strikes = optionsJson['strikes']
currentStockPrice = float(optionsQuote['regularMarketPrice'])
truncatedStrikes = ut.truncateStrikes(strikes, currentStockPrice)
return (optionsJson, truncatedStrikes, currentStockPrice)
def formatOptionChain(optionsJson, truncatedStrikes, currentStockPrice):
chainCalls = optionsJson['options'][0]['calls'] #append[i] to iterate through calls
chainPuts = optionsJson['options'][0]['puts'] #append[i] to iterate through puts
outputCalls, outputPuts = [],[]
for i in range(len(chainCalls)):
currentStrike = (chainCalls[i]['strike'])
if currentStrike in truncatedStrikes:
currentPrice = float(chainCalls[i]['lastPrice'])
currentIV = float(chainCalls[i]['impliedVolatility'])
currentITM = str(bool(strtobool(str(chainCalls[i]['inTheMoney']))))
currentProbOTM = (1 - ut.calculateDelta(currentPrice, currentStockPrice))
outputCalls.append(Call(currentStrike, currentPrice, currentProbOTM, currentIV, currentITM))
for i in range(len(chainPuts)):
currentStrike = (chainPuts[i]['strike'])
if currentStrike in truncatedStrikes:
currentPrice = float(chainPuts[i]['lastPrice'])
currentIV = float(chainPuts[i]['impliedVolatility'])
currentITM = str(bool(strtobool(str(chainPuts[i]['inTheMoney']))))
currentProbOTM = (1 - ut.calculateDelta(currentPrice, currentStockPrice))
outputPuts.append(Put(currentStrike, currentPrice, currentProbOTM, currentIV, currentITM))
return (outputCalls, outputPuts)
| [
"json.loads",
"utils.formatExpiryURL",
"utils.calculateDelta",
"utils.truncateStrikes",
"urllib.urlopen"
] | [((961, 980), 'json.loads', 'json.loads', (['rawData'], {}), '(rawData)\n', (971, 980), False, 'import json\n'), ((1196, 1242), 'utils.truncateStrikes', 'ut.truncateStrikes', (['strikes', 'currentStockPrice'], {}), '(strikes, currentStockPrice)\n', (1214, 1242), True, 'import utils as ut\n'), ((846, 873), 'utils.formatExpiryURL', 'ut.formatExpiryURL', (['expDate'], {}), '(expDate)\n', (864, 873), True, 'import utils as ut\n'), ((914, 933), 'urllib.urlopen', 'urllib.urlopen', (['url'], {}), '(url)\n', (928, 933), False, 'import urllib\n'), ((1909, 1959), 'utils.calculateDelta', 'ut.calculateDelta', (['currentPrice', 'currentStockPrice'], {}), '(currentPrice, currentStockPrice)\n', (1926, 1959), True, 'import utils as ut\n'), ((2383, 2433), 'utils.calculateDelta', 'ut.calculateDelta', (['currentPrice', 'currentStockPrice'], {}), '(currentPrice, currentStockPrice)\n', (2400, 2433), True, 'import utils as ut\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 20 17:12:53 2014
author: <NAME>
"""
import numpy as np
from statsmodels.regression.linear_model import OLS, WLS
from statsmodels.sandbox.regression.predstd import wls_prediction_std
def test_predict_se():
# this test doesn't use reference values
# checks conistency across options, and compares to direct calculation
# generate dataset
nsample = 50
x1 = np.linspace(0, 20, nsample)
x = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.01, 5.]
y_true2 = np.dot(x, beta)
w = np.ones(nsample)
w[nsample * 6. / 10:] = 3
sig = 0.5
y2 = y_true2 + sig * w * np.random.normal(size=nsample)
x2 = x[:,[0,2]]
# estimate OLS
res2 = OLS(y2, x2).fit()
#direct calculation
covb = res2.cov_params()
predvar = res2.mse_resid + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res2)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res2.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues - ci_half, rtol=1e-12)
prstd, iv_l, iv_u = wls_prediction_std(res2, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res2.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
# check WLS
res3 = WLS(y2, x2, 1. / w).fit()
#direct calculation
covb = res3.cov_params()
predvar = res3.mse_resid * w + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res3)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res3.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues - ci_half, rtol=1e-12)
# testing shapes of exog
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=[3, 3])
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res3.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
#use wrong size for exog
#prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,0], weights=3.)
np.testing.assert_raises(ValueError, wls_prediction_std, res3, x2[-1,0],
weights=3.)
# check some weight values
sew1 = wls_prediction_std(res3, x2[-3:,:])[0]**2
for wv in np.linspace(0.5, 3, 5):
sew = wls_prediction_std(res3, x2[-3:,:], weights=1. / wv)[0]**2
np.testing.assert_allclose(sew, sew1 + res3.scale * (wv - 1))
| [
"numpy.random.normal",
"statsmodels.sandbox.regression.predstd.wls_prediction_std",
"numpy.sqrt",
"numpy.ones",
"numpy.testing.assert_equal",
"statsmodels.regression.linear_model.WLS",
"numpy.testing.assert_allclose",
"numpy.testing.assert_raises",
"numpy.testing.assert_almost_equal",
"numpy.dot",... | [((427, 454), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'nsample'], {}), '(0, 20, nsample)\n', (438, 454), True, 'import numpy as np\n'), ((508, 525), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (522, 525), True, 'import numpy as np\n'), ((587, 602), 'numpy.dot', 'np.dot', (['x', 'beta'], {}), '(x, beta)\n', (593, 602), True, 'import numpy as np\n'), ((611, 627), 'numpy.ones', 'np.ones', (['nsample'], {}), '(nsample)\n', (618, 627), True, 'import numpy as np\n'), ((935, 951), 'numpy.sqrt', 'np.sqrt', (['predvar'], {}), '(predvar)\n', (942, 951), True, 'import numpy as np\n'), ((977, 1001), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res2'], {}), '(res2)\n', (995, 1001), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1006, 1056), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['prstd', 'predstd', '(15)'], {}), '(prstd, predstd, 15)\n', (1036, 1056), True, 'import numpy as np\n'), ((1149, 1222), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res2.fittedvalues + ci_half)'], {'rtol': '(1e-12)'}), '(iv_u, res2.fittedvalues + ci_half, rtol=1e-12)\n', (1175, 1222), True, 'import numpy as np\n'), ((1227, 1300), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res2.fittedvalues - ci_half)'], {'rtol': '(1e-12)'}), '(iv_l, res2.fittedvalues - ci_half, rtol=1e-12)\n', (1253, 1300), True, 'import numpy as np\n'), ((1326, 1361), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res2', 'x2[:3, :]'], {}), '(res2, x2[:3, :])\n', (1344, 1361), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1365, 1406), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[:3]'], {}), '(prstd, prstd[:3])\n', (1388, 1406), True, 'import numpy as np\n'), ((1411, 1497), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res2.fittedvalues[:3] + ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_u, res2.fittedvalues[:3] + ci_half[:3], rtol=\n 1e-12)\n', (1437, 1497), True, 'import numpy as np\n'), ((1528, 1614), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res2.fittedvalues[:3] - ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_l, res2.fittedvalues[:3] - ci_half[:3], rtol=\n 1e-12)\n', (1554, 1614), True, 'import numpy as np\n'), ((1834, 1850), 'numpy.sqrt', 'np.sqrt', (['predvar'], {}), '(predvar)\n', (1841, 1850), True, 'import numpy as np\n'), ((1876, 1900), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3'], {}), '(res3)\n', (1894, 1900), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1905, 1955), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['prstd', 'predstd', '(15)'], {}), '(prstd, predstd, 15)\n', (1935, 1955), True, 'import numpy as np\n'), ((2048, 2121), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res3.fittedvalues + ci_half)'], {'rtol': '(1e-12)'}), '(iv_u, res3.fittedvalues + ci_half, rtol=1e-12)\n', (2074, 2121), True, 'import numpy as np\n'), ((2126, 2199), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res3.fittedvalues - ci_half)'], {'rtol': '(1e-12)'}), '(iv_l, res3.fittedvalues - ci_half, rtol=1e-12)\n', (2152, 2199), True, 'import numpy as np\n'), ((2254, 2303), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-1:, :]'], {'weights': '(3.0)'}), '(res3, x2[-1:, :], weights=3.0)\n', (2272, 2303), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2306, 2347), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-1]'], {}), '(prstd, prstd[-1])\n', (2329, 2347), True, 'import numpy as np\n'), ((2372, 2420), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-1, :]'], {'weights': '(3.0)'}), '(res3, x2[-1, :], weights=3.0)\n', (2390, 2420), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2423, 2464), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-1]'], {}), '(prstd, prstd[-1])\n', (2446, 2464), True, 'import numpy as np\n'), ((2490, 2539), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-2:, :]'], {'weights': '(3.0)'}), '(res3, x2[-2:, :], weights=3.0)\n', (2508, 2539), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2542, 2584), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-2:]'], {}), '(prstd, prstd[-2:])\n', (2565, 2584), True, 'import numpy as np\n'), ((2610, 2662), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-2:, :]'], {'weights': '[3, 3]'}), '(res3, x2[-2:, :], weights=[3, 3])\n', (2628, 2662), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2666, 2708), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-2:]'], {}), '(prstd, prstd[-2:])\n', (2689, 2708), True, 'import numpy as np\n'), ((2734, 2769), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[:3, :]'], {}), '(res3, x2[:3, :])\n', (2752, 2769), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2773, 2814), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[:3]'], {}), '(prstd, prstd[:3])\n', (2796, 2814), True, 'import numpy as np\n'), ((2819, 2905), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res3.fittedvalues[:3] + ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_u, res3.fittedvalues[:3] + ci_half[:3], rtol=\n 1e-12)\n', (2845, 2905), True, 'import numpy as np\n'), ((2936, 3022), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res3.fittedvalues[:3] - ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_l, res3.fittedvalues[:3] - ci_half[:3], rtol=\n 1e-12)\n', (2962, 3022), True, 'import numpy as np\n'), ((3156, 3246), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'wls_prediction_std', 'res3', 'x2[-1, 0]'], {'weights': '(3.0)'}), '(ValueError, wls_prediction_std, res3, x2[-1, 0],\n weights=3.0)\n', (3180, 3246), True, 'import numpy as np\n'), ((3369, 3391), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(5)'], {}), '(0.5, 3, 5)\n', (3380, 3391), True, 'import numpy as np\n'), ((3475, 3536), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sew', '(sew1 + res3.scale * (wv - 1))'], {}), '(sew, sew1 + res3.scale * (wv - 1))\n', (3501, 3536), True, 'import numpy as np\n'), ((486, 502), 'numpy.ones', 'np.ones', (['nsample'], {}), '(nsample)\n', (493, 502), True, 'import numpy as np\n'), ((701, 731), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nsample'}), '(size=nsample)\n', (717, 731), True, 'import numpy as np\n'), ((783, 794), 'statsmodels.regression.linear_model.OLS', 'OLS', (['y2', 'x2'], {}), '(y2, x2)\n', (786, 794), False, 'from statsmodels.regression.linear_model import OLS, WLS\n'), ((1670, 1690), 'statsmodels.regression.linear_model.WLS', 'WLS', (['y2', 'x2', '(1.0 / w)'], {}), '(y2, x2, 1.0 / w)\n', (1673, 1690), False, 'from statsmodels.regression.linear_model import OLS, WLS\n'), ((3313, 3349), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-3:, :]'], {}), '(res3, x2[-3:, :])\n', (3331, 3349), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((3408, 3462), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-3:, :]'], {'weights': '(1.0 / wv)'}), '(res3, x2[-3:, :], weights=1.0 / wv)\n', (3426, 3462), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((892, 910), 'numpy.dot', 'np.dot', (['covb', 'x2.T'], {}), '(covb, x2.T)\n', (898, 910), True, 'import numpy as np\n'), ((1791, 1809), 'numpy.dot', 'np.dot', (['covb', 'x2.T'], {}), '(covb, x2.T)\n', (1797, 1809), True, 'import numpy as np\n')] |
import logging
import jsonpointer
import yadage.handlers.utils as utils
from yadage.handlers.expression_handlers import handlers as exprhandlers
log = logging.getLogger(__name__)
handlers, predicate = utils.handler_decorator()
def checkmeta(flowview, metainfo):
log.debug('checking meta %s on view with offset %s',
metainfo, flowview.offset)
applied_ids = [rl.identifier for rl in flowview.applied_rules]
rulesok = all([x in applied_ids for x in metainfo['stages']])
stepsok = all([flowview.dag.getNode(x).has_result()
for x in metainfo['steps']])
log.debug('all rules applied: %s, all steps have results: %s',
rulesok, stepsok)
return (rulesok and stepsok)
def scope_done(scope, flowview):
'''
walks recursively all scopes starting at some initial scope to determine if
all steps and stages under this scope have been executed / applied. Will indicate
that it's safe to reference any result of the workflow within that scope.
'''
log.debug('checking scope %s on view with offset %s',
scope, flowview.offset)
result = True
bookkeeper = jsonpointer.JsonPointer(scope).resolve(flowview.bookkeeper)
for k, v in bookkeeper.items():
for k, v in bookkeeper.items():
if k == '_meta':
result = result and checkmeta(flowview, v)
else:
childscope = scope + '/{}'.format(k)
result = result and scope_done(childscope, flowview)
return result
@predicate('jsonpath_ready')
def jsonpath_ready(stage, depspec,stagespec):
'''
the main predicate for yadage. for a list of jsonpath expressions
determine whether the stage or workflow scope is ready (i.e. has a result)
'''
log.debug('checking jsonpath ready predicate\n%s', depspec)
dependencies = depspec['expressions']
for x in dependencies:
depmatches = stage.view.query(x, stage.view.steps)
if not depmatches:
log.debug('no query matches, not ready')
return False
issubwork = '_nodeid' not in depmatches[0].value[0]
if issubwork:
log.debug('dependency is a subworkflow. determine if scope is done')
if not all([scope_done(scope['_offset'], stage.view) for match in depmatches for scope in match.value]):
return False
else:
if not all([x.has_result() for x in stage.view.getSteps(x)]):
return False
log.debug('all checks ok, predicate is True')
return True
@predicate('expressions_fulfilled')
def expressions_fulfilled(stage, depspec,stagespec):
'''
the main predicate for yadage. for a list of jsonpath expressions
determine whether the stage or workflow scope is ready (i.e. has a result)
'''
log.debug('checking jsonpath ready predicate\n%s', depspec)
expressions = depspec['expressions']
for expression in expressions:
handler = exprhandlers[expression['expression_type']]
value = handler(stage.view, expression)
if not value:
return False
return True
| [
"logging.getLogger",
"yadage.handlers.utils.handler_decorator",
"jsonpointer.JsonPointer"
] | [((154, 181), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'import logging\n'), ((205, 230), 'yadage.handlers.utils.handler_decorator', 'utils.handler_decorator', ([], {}), '()\n', (228, 230), True, 'import yadage.handlers.utils as utils\n'), ((1163, 1193), 'jsonpointer.JsonPointer', 'jsonpointer.JsonPointer', (['scope'], {}), '(scope)\n', (1186, 1193), False, 'import jsonpointer\n')] |
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local").appName('ReadParquet').config("spark.driver.host", "localhost").config(
"spark.ui.port", "4040").getOrCreate()
peopleDF = spark.read.json("people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.format("parquet").mode("overwrite").save("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# spark.stop()
| [
"pyspark.sql.SparkSession.builder.master"
] | [((46, 82), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""local"""'], {}), "('local')\n", (73, 82), False, 'from pyspark.sql import SparkSession\n')] |
import os
from enum import Enum
import requests
from docker import ContextAPI
from docker.transport import UnixHTTPAdapter
from compose.const import IS_WINDOWS_PLATFORM
if IS_WINDOWS_PLATFORM:
from docker.transport import NpipeHTTPAdapter
class Status(Enum):
SUCCESS = "success"
FAILURE = "failure"
CANCELED = "canceled"
class MetricsSource:
CLI = "docker-compose"
if IS_WINDOWS_PLATFORM:
METRICS_SOCKET_FILE = 'npipe://\\\\.\\pipe\\docker_cli'
else:
METRICS_SOCKET_FILE = 'http+unix:///var/run/docker-cli.sock'
class MetricsCommand(requests.Session):
"""
Representation of a command in the metrics.
"""
def __init__(self, command,
context_type=None, status=Status.SUCCESS,
source=MetricsSource.CLI, uri=None):
super().__init__()
self.command = ("compose " + command).strip() if command else "compose --help"
self.context = context_type or ContextAPI.get_current_context().context_type or 'moby'
self.source = source
self.status = status.value
self.uri = uri or os.environ.get("METRICS_SOCKET_FILE", METRICS_SOCKET_FILE)
if IS_WINDOWS_PLATFORM:
self.mount("http+unix://", NpipeHTTPAdapter(self.uri))
else:
self.mount("http+unix://", UnixHTTPAdapter(self.uri))
def send_metrics(self):
try:
return self.post("http+unix://localhost/usage",
json=self.to_map(),
timeout=.05,
headers={'Content-Type': 'application/json'})
except Exception as e:
return e
def to_map(self):
return {
'command': self.command,
'context': self.context,
'source': self.source,
'status': self.status,
}
| [
"docker.ContextAPI.get_current_context",
"os.environ.get",
"docker.transport.UnixHTTPAdapter",
"docker.transport.NpipeHTTPAdapter"
] | [((1100, 1158), 'os.environ.get', 'os.environ.get', (['"""METRICS_SOCKET_FILE"""', 'METRICS_SOCKET_FILE'], {}), "('METRICS_SOCKET_FILE', METRICS_SOCKET_FILE)\n", (1114, 1158), False, 'import os\n'), ((954, 986), 'docker.ContextAPI.get_current_context', 'ContextAPI.get_current_context', ([], {}), '()\n', (984, 986), False, 'from docker import ContextAPI\n'), ((1230, 1256), 'docker.transport.NpipeHTTPAdapter', 'NpipeHTTPAdapter', (['self.uri'], {}), '(self.uri)\n', (1246, 1256), False, 'from docker.transport import NpipeHTTPAdapter\n'), ((1311, 1336), 'docker.transport.UnixHTTPAdapter', 'UnixHTTPAdapter', (['self.uri'], {}), '(self.uri)\n', (1326, 1336), False, 'from docker.transport import UnixHTTPAdapter\n')] |
# -*- coding: UTF-8 -*-
import time
import simplejson as json
from MySQLdb.connections import numeric_part
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponse
from common.utils.extend_json_encoder import ExtendJSONEncoder
from common.utils.const import SQLTuning
from sql.utils.dao import Dao
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword, DML
@permission_required('sql.optimize_sqltuning', raise_exception=True)
def tuning(request):
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
sqltext = request.POST.get('sql_content')
option = request.POST.getlist('option[]')
sql_tunning = SqlTuning(instance_name=instance_name, db_name=db_name, sqltext=sqltext)
result = {'status': 0, 'msg': 'ok', 'data': {}}
if 'sys_parm' in option:
basic_information = sql_tunning.basic_information()
sys_parameter = sql_tunning.sys_parameter()
optimizer_switch = sql_tunning.optimizer_switch()
result['data']['basic_information'] = basic_information
result['data']['sys_parameter'] = sys_parameter
result['data']['optimizer_switch'] = optimizer_switch
if 'sql_plan' in option:
plan, optimizer_rewrite_sql = sql_tunning.sqlplan()
result['data']['optimizer_rewrite_sql'] = optimizer_rewrite_sql
result['data']['plan'] = plan
if 'obj_stat' in option:
object_statistics_tableistructure, object_statistics_tableinfo, object_statistics_indexinfo = sql_tunning.object_statistics()
result['data']['object_statistics_tableistructure'] = object_statistics_tableistructure
result['data']['object_statistics_tableinfo'] = object_statistics_tableinfo
result['data']['object_statistics_indexinfo'] = object_statistics_indexinfo
if 'sql_profile' in option:
session_status = sql_tunning.exec_sql()
result['data']['session_status'] = session_status
# 关闭连接
sql_tunning.dao.close()
result['data']['sqltext'] = sqltext
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
class SqlTuning(object):
def __init__(self, instance_name, db_name, sqltext):
self.dao = Dao(instance_name=instance_name, flag=True)
self.db_name = db_name
self.sqltext = sqltext
self.sql_variable = '''
select
lower(variable_name),
variable_value
from performance_schema.global_variables
where upper(variable_name) in ('%s')
order by variable_name;''' % ('\',\''.join(SQLTuning.SYS_PARM_FILTER))
self.sql_optimizer_switch = '''
select variable_value
from performance_schema.global_variables
where upper(variable_name) = 'OPTIMIZER_SWITCH';
'''
self.sql_table_info = '''
select
table_name,
engine,
row_format as format,
table_rows,
avg_row_length as avg_row,
round((data_length + index_length) / 1024 / 1024, 2) as total_mb,
round((data_length) / 1024 / 1024, 2) as data_mb,
round((index_length) / 1024 / 1024, 2) as index_mb
from information_schema.tables
where table_schema = '%s' and table_name = '%s'
'''
self.sql_table_index = '''
select
table_name,
index_name,
non_unique,
seq_in_index,
column_name,
collation,
cardinality,
nullable,
index_type
from information_schema.statistics
where table_schema = '%s' and table_name = '%s'
order by 1, 3;
'''
@staticmethod
def __is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def __extract_from_part(self, parsed):
from_seen = False
for item in parsed.tokens:
# print item.ttype,item.value
if from_seen:
if self.__is_subselect(item):
for x in self.__extract_from_part(item):
yield x
elif item.ttype is Keyword:
raise StopIteration
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
@staticmethod
def __extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_real_name()
elif isinstance(item, Identifier):
yield item.get_real_name()
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def __extract_tables(self, p_sqltext):
stream = self.__extract_from_part(sqlparse.parse(p_sqltext)[0])
return list(self.__extract_table_identifiers(stream))
def basic_information(self):
return self.dao.mysql_query(sql="select @@version")
def sys_parameter(self):
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = self.sql_variable.replace('performance_schema', 'information_schema')
else:
sql = self.sql_variable
return self.dao.mysql_query(sql=sql)
def optimizer_switch(self):
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
return self.dao.mysql_query(sql=sql)
def sqlplan(self):
plan = self.dao.mysql_query(self.db_name, "explain extended " + self.sqltext)
optimizer_rewrite_sql = self.dao.mysql_query(sql="show warnings")
return plan, optimizer_rewrite_sql
# 获取关联表信息存在缺陷,只能获取到一张表
def object_statistics(self):
tableistructure = {'column_list': [], 'rows': []}
tableinfo = {'column_list': [], 'rows': []}
indexinfo = {'column_list': [], 'rows': []}
for index, table_name in enumerate(self.__extract_tables(self.sqltext)):
tableistructure = self.dao.mysql_query(db_name=self.db_name, sql="show create table {};".format(
table_name.replace('`', '').lower()))
tableinfo = self.dao.mysql_query(
sql=self.sql_table_info % (self.db_name, table_name.replace('`', '').lower()))
indexinfo = self.dao.mysql_query(
sql=self.sql_table_index % (self.db_name, table_name.replace('`', '').lower()))
return tableistructure, tableinfo, indexinfo
def exec_sql(self):
result = {"EXECUTE_TIME": 0,
"BEFORE_STATUS": {'column_list': [], 'rows': []},
"AFTER_STATUS": {'column_list': [], 'rows': []},
"SESSION_STATUS(DIFFERENT)": {'column_list': ['status_name', 'before', 'after', 'diff'], 'rows': []},
"PROFILING_DETAIL": {'column_list': [], 'rows': []},
"PROFILING_SUMMARY": {'column_list': [], 'rows': []}
}
sql_profiling = "select concat(upper(left(variable_name,1)),substring(lower(variable_name),2,(length(variable_name)-1))) var_name,variable_value var_value from performance_schema.session_status order by 1"
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = sql_profiling.replace('performance_schema', 'information_schema')
else:
sql = sql_profiling
self.dao.mysql_query(sql="set profiling=1")
records = self.dao.mysql_query(sql="select ifnull(max(query_id),0) from INFORMATION_SCHEMA.PROFILING")
query_id = records['rows'][0][0] + 3 # skip next sql
# 获取执行前信息
result['BEFORE_STATUS'] = self.dao.mysql_query(sql=sql)
# 执行查询语句,统计执行时间
t_start = time.time()
self.dao.mysql_query(sql=self.sqltext)
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
result['EXECUTE_TIME'] = cost_time
# 获取执行后信息
result['AFTER_STATUS'] = self.dao.mysql_query(sql=sql)
# 获取PROFILING_DETAIL信息
result['PROFILING_DETAIL'] = self.dao.mysql_query(
sql="select STATE,DURATION,CPU_USER,CPU_SYSTEM,BLOCK_OPS_IN,BLOCK_OPS_OUT ,MESSAGES_SENT ,MESSAGES_RECEIVED ,PAGE_FAULTS_MAJOR ,PAGE_FAULTS_MINOR ,SWAPS from INFORMATION_SCHEMA.PROFILING where query_id=" + str(
query_id) + " order by seq")
result['PROFILING_SUMMARY'] = self.dao.mysql_query(
sql="SELECT STATE,SUM(DURATION) AS Total_R,ROUND(100*SUM(DURATION)/(SELECT SUM(DURATION) FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + "),2) AS Pct_R,COUNT(*) AS Calls,SUM(DURATION)/COUNT(*) AS R_Call FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + " GROUP BY STATE ORDER BY Total_R DESC")
# 处理执行前后对比信息
before_status_rows = [list(item) for item in result['BEFORE_STATUS']['rows']]
after_status_rows = [list(item) for item in result['AFTER_STATUS']['rows']]
for index, item in enumerate(before_status_rows):
if before_status_rows[index][1] != after_status_rows[index][1]:
before_status_rows[index].append(after_status_rows[index][1])
before_status_rows[index].append(
str(float(after_status_rows[index][1]) - float(before_status_rows[index][1])))
diff_rows = [item for item in before_status_rows if len(item) == 4]
result['SESSION_STATUS(DIFFERENT)']['rows'] = diff_rows
return result
| [
"simplejson.dumps",
"django.contrib.auth.decorators.permission_required",
"MySQLdb.connections.numeric_part",
"time.time",
"sql.utils.dao.Dao",
"sqlparse.parse"
] | [((456, 523), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.optimize_sqltuning"""'], {'raise_exception': '(True)'}), "('sql.optimize_sqltuning', raise_exception=True)\n", (475, 523), False, 'from django.contrib.auth.decorators import permission_required\n'), ((2125, 2189), 'simplejson.dumps', 'json.dumps', (['result'], {'cls': 'ExtendJSONEncoder', 'bigint_as_string': '(True)'}), '(result, cls=ExtendJSONEncoder, bigint_as_string=True)\n', (2135, 2189), True, 'import simplejson as json\n'), ((2351, 2394), 'sql.utils.dao.Dao', 'Dao', ([], {'instance_name': 'instance_name', 'flag': '(True)'}), '(instance_name=instance_name, flag=True)\n', (2354, 2394), False, 'from sql.utils.dao import Dao\n'), ((8653, 8664), 'time.time', 'time.time', ([], {}), '()\n', (8662, 8664), False, 'import time\n'), ((8728, 8739), 'time.time', 'time.time', ([], {}), '()\n', (8737, 8739), False, 'import time\n'), ((5225, 5250), 'sqlparse.parse', 'sqlparse.parse', (['p_sqltext'], {}), '(p_sqltext)\n', (5239, 5250), False, 'import sqlparse\n'), ((5552, 5567), 'MySQLdb.connections.numeric_part', 'numeric_part', (['n'], {}), '(n)\n', (5564, 5567), False, 'from MySQLdb.connections import numeric_part\n'), ((5965, 5980), 'MySQLdb.connections.numeric_part', 'numeric_part', (['n'], {}), '(n)\n', (5977, 5980), False, 'from MySQLdb.connections import numeric_part\n'), ((8087, 8102), 'MySQLdb.connections.numeric_part', 'numeric_part', (['n'], {}), '(n)\n', (8099, 8102), False, 'from MySQLdb.connections import numeric_part\n')] |
from django.contrib import admin
from .models import Item
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
fieldsets = [
('Item', {
'fields': [
'name',
'stock',
'description',
'thumbnail'
]
}),
('Meta', {
'fields': [
'views',
]
}),
]
search_fields = [
'name',
]
| [
"django.contrib.admin.register"
] | [((60, 80), 'django.contrib.admin.register', 'admin.register', (['Item'], {}), '(Item)\n', (74, 80), False, 'from django.contrib import admin\n')] |
#!/usr/bin/python3
import sys
import os, os.path
import platform
import shutil
import time
import re
import difflib
import pickle
from subprocess import run, PIPE
from colorama import init, Fore, Back, Style
from statistics import median
# Globals
if platform.system() in ['Linux', 'Darwin']:
SYNQUID_CMD = ['stack', 'exec', '--', 'resyn'] # Command to call Resyn
TIMEOUT_CMD = ['timeout'] # Timeout command
TIMEOUT = ['300'] # Timeout value (seconds)
else:
SYNQUID_CMD = ['Resyn.exe']
TIMEOUT_CMD = ['']
TIMEOUT = ['']
LOGFILE = 'results.log' # Log file
MICRO_LOGFILE = 'micro.log' # Log file
DUMPFILE = 'results' # Result serialization file
MICRO_DUMPFILE = 'micro' # you know
CSV_FILE = 'results.csv' # CSV-output file
MICRO_CSV_FILE = 'micro.csv' # CSV-output file (micro benchmarks)
LATEX_FILE = 'results.tex' # Latex-output file
MICRO_LATEX_FILE = 'micro.tex' # Latex-output file (micro benchmarks)
ORACLE_FILE = 'solutions' # Solutions file
MICRO_ORACLE_FILE = 'micro_solutions' # you know
COMMON_OPTS = ['--print-stats'] # Options to use for all benchmarks
RESOURCE_OPTS = []
RESOURCES_OFF_OPT = ['-r=false'] # Option to disable resource analysis
FNULL = open(os.devnull, 'w') # Null file
PAPER_PATH = '/home/tristan/Research/resource-paper/'
class Benchmark:
def __init__(self, name, description, components='', options=[], np = '-'):
self.name = name # Id
self.description = description # Description (in the table)
self.components = components # Description of components used (in the table)
self.options = options # Command-line options to use for this benchmark when running in individual context
self.num_programs = np # Number of programs generated in the enumerate-and-check process
def str(self):
return self.name + ': ' + self.description + ' ' + str(self.options)
# Micro benchmark
class MBenchmark:
def __init__(self, name, description, signature='', components='', options=[], complexity='', complexity_nr='', eac=-1, incremental=-1):
self.name = name # file to test
self.description = description # Description (in the table)
self.signature = signature # Type signature
self.complexity = complexity
self.complexity_nr = complexity_nr
self.components = components # Description of components used (in the table)
self.options = options # Command-line options to use for this benchmark when running in individual context
self.eac = eac
self.incremental = incremental
def str(self):
return self.name + ': ' + self.description + ' ' + str(self.options)
class BenchmarkGroup:
def __init__(self, name, default_options, benchmarks):
self.name = name # Id
self.default_options = default_options # Command-line options to use for all benchmarks in this group when running in common context
self.benchmarks = benchmarks # List of benchmarks in this group
INSERT_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
INSERT_FG_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{\\tilist{\\tpot{\\alpha}{\\mathsf{ite}(x > \\nu, 1, 0)}}}\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
INSERT_MEASURE_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{ \\tpot{ \\tilist{ \\alpha }}{\\mathsf{numgt}(x,\\nu)} }\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
LEN_COMPARE_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tlist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tlist{\\alpha}}{\\tsubset{\\tbool}{\\nu = ( \T{len} \ ys = \T{len} \ zs )}}} $'
REPLICATE_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{x}{n \\times \\tpot{\\alpha}{n}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = n}}$'
INTERSECT_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{elems} \ \\nu = \T{elems} \ ys \\cap \T{elems} \ zs}}}$'
RANGE_TYPE = '$\\tarrow{lo}{\T{Int}}\
{\\tarrow{hi}{\\tsubset{\\tpot{\T{Int}}{\\nu - lo}}{\\nu \geq lo}}\
{\\tsubset{\\tilist{\\tsubset{\T{Int}}{lo \leq \\nu \leq hi}}}{\T{len} \\nu = hi - lo}}}\
{} $'
COMPRESS_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tclist{\\alpha}}{\T{elems} \ xs = \T{elems} \ \\nu}}$'
TRIPLE_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{2}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = \T{len} \ xs + \T{len} \ xs + \T{len} \ xs }}$'
TRIPLE_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{2}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = \T{len} \ xs + \T{len} \ xs + \T{len} \ xs }}$'
CONCAT_TYPE = '$\\forall\\alpha .\
\\tarrow{xxs}{\\tlist{\\tlist{\\tpot{\\alpha}{1}}}}\
{\\tarrow{acc}{\\tlist{\\alpha}}\
{\\tsubset{\\tlist{\\alpha}}{\T{sumLen} \ xs = \T{len} \\nu}}}$'
DIFF_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{elems} \ \\nu = \T{elems} \ ys - \T{elems} \ zs}}}$'
UNION_TYPE = '$\\forall\\alpha .\
\\tarrow{xs}{\\tlist{\\alpha}}\
{\\tarrow{ys}{\\tpot{\\tlist{\\alpha}}{\\mathsf{min}(\T{len} \ xs, \T{len} \ ys)}}\
{\\tsubset{\\tlist{\\alpha}}{\\T{elems} \\nu = \\T{elems} \ xs \\cup \\T{elems} \ ys}}}$'
TAKE_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{xs}{\\tpot{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu \\geq n}}{n}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu = n}}}$'
DROP_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{xs}{\\tpot{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu \\geq n}}{n}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu = \T{len} xs - n}}}$'
MICRO_BENCHMARKS = [
MBenchmark('List-Triple1', 'triple', TRIPLE_TYPE, 'append', ['--multiplicities=false'], '$\mid xs \mid$', '$\mid xs \mid$', 1),
MBenchmark('List-Triple2', 'triple\'', TRIPLE_TYPE, 'append\'', ['--multiplicities=false'], '$\mid xs \mid$', '$\mid xs \mid^2$', 1),
MBenchmark('List-Concat', 'concat list of lists', CONCAT_TYPE, 'append', [], '$\mid xxs \mid$', '$\mid xxs \mid^2$',1),
MBenchmark('List-Compress', 'compress', COMPRESS_TYPE, '$=$,$\\neq$', [], '$\mid xs \mid$', '$2^{ \mid xs \mid }$',1),
MBenchmark('List-Intersect', 'common', INTERSECT_TYPE, '$<$, member', ['-f=AllArguments', '-a=2', '--backtrack'], '$\mid ys \mid + \mid zs \mid$', '$\mid ys \mid \mid zs \mid$', 1),
MBenchmark('List-Diff', 'list difference', DIFF_TYPE, '$<$, member', ['-f=AllArguments', '--backtrack'], '$\mid ys \mid + \mid zs \mid$', '$\mid ys \mid \mid zs \mid$',1),
MBenchmark('List-Insert', 'insert', INSERT_TYPE , '$<$', ['--backtrack'], '$\mid xs \mid$', '$\mid xs \mid$'),
MBenchmark('List-Insert-Fine', 'insert\'', INSERT_MEASURE_TYPE, '$<$', ['-a=2', '--backtrack'], '$\T{numgt}(x,xs)$', '$\mid xs \mid$',-1,1),
MBenchmark('List-Insert-Fine-Alt', 'insert\'\'', INSERT_FG_TYPE, '$<$', [], '$\T{numgt}(x,xs)$', '$\mid xs \mid$',-1,1),
MBenchmark('List-Replicate', 'replicate', REPLICATE_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Take', 'take', TAKE_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Drop', 'drop', DROP_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Range', 'range', RANGE_TYPE, 'inc,dec,$\geq$', ['-f=Nonterminating'], '$hi - lo$', '-',-1,1),
#MBenchmark('List-Union', 'union', UNION_TYPE, 'min, $\leq$', ['--explicit-match'], '$min(\mid xs \mid, \mid ys \mid )$', '$\mid xs \mid$',1,1),
MBenchmark('List-InsertCT', 'CT insert', INSERT_TYPE, '$<$', ['--ct', '--backtrack', '-a=2'], '$\mid xs \mid$', '$\mid xs \mid$', 1),
MBenchmark('List-LenCompareCT', 'CT compare', LEN_COMPARE_TYPE, 'true, false, and', ['-f=AllArguments', '-a=2', '--ct'], '$\mid ys \mid$', '$\mid ys \mid$', 1),
MBenchmark('List-LenCompare', 'compare', LEN_COMPARE_TYPE, 'true, false, and', ['-f=AllArguments', '-a=2'], '$\mid ys \mid$', '$\mid ys \mid$'),
#MBenchmark('List-Union', 'union', ''),
#MBenchmark('List-Pairs', 'ordered pairs', 'append, attach' ),
]
ALL_BENCHMARKS = [
BenchmarkGroup("List", [], [
Benchmark('List-Null', 'is empty', 'true, false'),
Benchmark('List-Elem', 'member', 'true, false, $=$, $\\neq$'),
Benchmark('List-Stutter', 'duplicate each element'),
Benchmark('List-Replicate', 'replicate', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-Append', 'append two lists', ''),
Benchmark('List-Take', 'take first $n$ elements', '0, inc, dec, $\\leq$, $\\neq$', ['--cegis-max=50']),
Benchmark('List-Drop', 'drop first $n$ elements', '0, inc, dec, $\\leq$, $\\neq$', ['--cegis-max=50']),
Benchmark('List-Concat', 'concat list of lists', 'append'),
Benchmark('List-Delete', 'delete value', '$=$, $\\neq$'),
Benchmark('List-Zip', 'zip'),
Benchmark('List-ZipWith', 'zip with'),
Benchmark('List-Ith', '$i$-th element', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-ElemIndex', 'index of element', '0, inc, dec, $=$, $\\neq$'),
Benchmark('List-Snoc', 'insert at end'),
Benchmark('List-Split', 'balanced split', 'fst, snd, abs', ['-m=3']),
Benchmark('List-Reverse', 'reverse', 'insert at end'),
Benchmark('IncList-Insert', 'insert (sorted)', '$\\leq$, $\\neq$'),
Benchmark('List-ExtractMin', 'extract minimum', '$\\leq$, $\\neq$', ['-a=2', '-m=3']),
#Benchmark('List-Range', 'range', 'inc,dec,$\geq$'),
Benchmark('List-Foldr', 'foldr'),
Benchmark('List-Fold-Length', 'length using fold', '0, inc, dec', ['-m=0']),
Benchmark('List-Fold-Append', 'append using fold', '', ['-m=0']),
Benchmark('List-Map', 'map'),
#Benchmark('List-Split', 'split list', '', ['-m=3'])
# Try it by hand!
#Benchmark('TripleList-Intersect', 'three-way intersection', '$<$, member', ['-f=AllArguments', '-m=3'])
]),
BenchmarkGroup("Unique list", [], [
Benchmark('UniqueList-Insert', 'insert', '$=$, $\\neq$'),
Benchmark('UniqueList-Delete', 'delete', '$=$, $\\neq$'),
#Benchmark('List-Nub', 'remove duplicates', 'member', []),
Benchmark('List-Compress', 'compress', '$=$, $\\neq$', np = 3),
Benchmark('UniqueList-Range', 'integer range', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-Partition', 'partition', '$\\leq$'),
#Benchmark('IncList-Pivot', 'append with pivot'),
]),
BenchmarkGroup("Sorted list", ['-f=AllArguments'], [
Benchmark('StrictIncList-Insert', 'insert', '$<$'),
Benchmark('StrictIncList-Delete', 'delete', '$<$'),
#Benchmark('List-Diff', 'difference', 'member, $<$', ['--backtrack', '-f=AllArguments']),
#Benchmark('TripleList-Intersect', 'three-way intersection', '$<$, member',['-f=AllArguments','--backtrack','-m=3'])
Benchmark('StrictIncList-Intersect', 'intersect', '$<$', ['-f=AllArguments', '--backtrack']),
]),
BenchmarkGroup("Tree", [], [
Benchmark('Tree-Count', 'node count', '0, 1, +'),
Benchmark('Tree-Flatten', 'preorder', 'append'),
Benchmark('Tree-ToList', 'to list', 'append'),
Benchmark('Tree-Elem', 'member', 'false, not, or, $=$', ['--multiplicities=false'] )
#Benchmark('Tree-BalancedReplicate', 'create balanced', '0, inc, dec,
#$\\leq$, $\\neq$' )
#Benchmark('Tree-Count', 'size')
]),
BenchmarkGroup("BST", [], [
Benchmark('BST-Member', 'member', 'true, false, $\\leq$, $\\neq$'),
Benchmark('BST-Insert', 'insert', '$\\leq$, $\\neq$'),
Benchmark('BST-Delete', 'delete', '$\\leq$, $\\neq$'),
Benchmark('BST-Sort', 'BST sort', '$\\leq$, $\\neq$')
]),
BenchmarkGroup("AVL", ['-a=2'], [
Benchmark('AVL-RotateL', 'rotate left', 'inc', ['-a 2', '-u']),
Benchmark('AVL-RotateR', 'rotate right', 'inc', ['-a 2', '-u']),
Benchmark('AVL-Balance', 'balance', 'rotate, nodeHeight, isSkewed, isLHeavy, isRHeavy', ['-a 2', '-e']),
Benchmark('AVL-Insert', 'insert', 'balance, $<$', ['-a 2']),
Benchmark('AVL-ExtractMin', 'extract minimum', '$<$', ['-a 2']),
Benchmark('AVL-Delete', 'delete', 'extract minimum, balance, $<$', ['-a 2', '-m 1']),
]),
BenchmarkGroup("RBT", ['-m=1', '-a=2'], [
Benchmark('RBT-BalanceL', 'balance left', '', ['-m=1', '-a=2']),
Benchmark('RBT-BalanceR', 'balance right', '', ['-m=1', '-a=2']),
Benchmark('RBT-Insert', 'insert', 'balance left, right, $\\leq$, $\\neq$', ['-m=1', '-a=2'])
]),
BenchmarkGroup("User", [], [
Benchmark('Evaluator', 'desugar AST', '0, 1, 2'),
Benchmark('AddressBook-Make', 'make address book', 'is private', ['-a=2']),
#Benchmark('AddressBook-Merge', 'merge address books', 'append', ['-a=2'])
]),
BenchmarkGroup("Binary Heap", [], [
Benchmark('BinHeap-Insert', 'insert', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Member', 'member', 'false, not, or, $\leq$, $\\neq$', ['--multiplicities=false']),
Benchmark('BinHeap-Singleton', '1-element constructor', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Doubleton', '2-element constructor', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Tripleton', '3-element constructor', '$\\leq$, $\\neq$')
])
]
class SynthesisResult:
def __init__(self, name, time, goal_count, code_size, spec_size, measure_count, num_constraints):
self.name = name # Benchmark name
self.time = time # Synthesis time (seconds)
self.goal_count = goal_count # Number of synthesis goals
self.code_size = code_size # Cumulative synthesized code size (in AST nodes)
self.spec_size = spec_size # Cumulative specification size (in AST nodes)
self.measure_count = measure_count # Number of measures defined
self.optimized = False
self.nres_code_size = '-'
self.nres_time = -3.0
self.eac_time = -3.0
self.incremental_time = -3.0
self.pct_slowdown = 0.0
self.num_constraints = num_constraints
def str(self):
return self.name + ', ' + '{0:0.2f}'.format(self.time) + ', ' + self.goal_count + ', ' + self.code_size + ', ' + self.spec_size + ', ' + self.measure_count
def run_benchmark(name, opts, default_opts):
'''Run benchmark name with command-line options opts (use default_opts with running the common context variant); record results in the results dictionary'''
with open(LOGFILE, 'a+') as logfile:
start = time.time()
logfile.write(name + '\n')
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode: # Synthesis failed
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
synthesis_output = ''
results [name] = SynthesisResult(name, (end - start), '-', '-', '-', '-', '-')
else: # Synthesis succeeded: code metrics from the output and record synthesis time
logfile.write(synthesis_res.stdout)
lastLines = synthesis_res.stdout.split('\n')[-6:]
synthesis_output = synthesis_res.stdout.split('\n')[:-6]
goal_count = re.match("\(Goals: (\d+)\).*$", lastLines[0]).group(1)
measure_count = re.match("\(Measures: (\d+)\).*$", lastLines[1]).group(1)
spec_size = re.match("\(Spec size: (\d+)\).*$", lastLines[2]).group(1)
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
num_constraints = re.match("\(Number of resource constraints: (\d+)\).*$", lastLines[4]).group(1)
results [name] = SynthesisResult(name, (end - start), goal_count, solution_size, spec_size, measure_count, num_constraints)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end = ' ')
variant_options = [ # Command-line options to use for each variant of Synquid
('nres', opts + RESOURCES_OFF_OPT),
]
# Run each variant: (now there's only one, should probably change this...)
for (variant_id, opts) in variant_options:
run_version(name, variant_id, opts, logfile, str(synthesis_output), results)
print()
def run_micro_benchmark(name, opts, default_opts, eac, incremental):
'''Run benchmark name with command-line options opts (use default_opts with running the common context variant); record results in the results dictionary'''
with open(MICRO_LOGFILE, 'a+') as logfile:
start = time.time()
logfile.write(name + '\n')
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode: # Synthesis failed
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
synthesis_output = ''
micro_results [name] = SynthesisResult(name, (end - start), '-', '-', '-', '-', '-')
else: # Synthesis succeeded: code metrics from the output and record synthesis time
logfile.write(synthesis_res.stdout)
lastLines = synthesis_res.stdout.split('\n')[-6:]
synthesis_output = synthesis_res.stdout.split('\n')[:-6]
goal_count = re.match("\(Goals: (\d+)\).*$", lastLines[0]).group(1)
measure_count = re.match("\(Measures: (\d+)\).*$", lastLines[1]).group(1)
spec_size = re.match("\(Spec size: (\d+)\).*$", lastLines[2]).group(1)
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
num_constraints = re.match("\(Number of resource constraints: (\d+)\).*$", lastLines[4]).group(1)
micro_results [name] = SynthesisResult(name, (end - start), goal_count, solution_size, spec_size, measure_count, num_constraints)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end = ' ')
variant_options = [ # Command-line options to use for each variant of Synquid
('nres', opts + RESOURCES_OFF_OPT),
]
# Run each variant: (now there's only one, should probably change this...)
for (variant_id, opts) in variant_options:
run_version(name, variant_id, opts, logfile, str(synthesis_output), micro_results)
eac_opts = ['--eac', '--backtrack']
if eac < 0:
micro_results[name].eac_time = '{-}'
else:
run_micro_version(name, logfile, 'EAC', eac_opts, lambda t: set_eac_time(micro_results, name, t))
incremental_opts = ['--inc-cegis=false']
if incremental < 0:
micro_results[name].incremental_time = '{-}'
else:
run_micro_version(name, logfile, 'NONINCREMENTAL', incremental_opts, lambda t: set_inc_time(micro_results, name, t))
print()
def set_eac_time(res, name, t):
res[name].eac_time = t
def set_inc_time(res, name, t):
res[name].incremental_time = t
def run_version(name, variant_id, variant_opts, logfile, with_res, results_file):
'''Run benchmark name using command-line options variant_opts and record it as a Synquid variant variant_id in the results dictionary'''
start = time.time()
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark, mute output:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS +
variant_opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
#results_file[name].eac_time = -1
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode == 124: # Timeout: record timeout
print(Back.RED + Fore.RED + Style.BRIGHT + 'TIMEOUT' + Style.RESET_ALL, end = ' ')
results_file[name].nres_time = -1
elif synthesis_res.returncode: # Synthesis failed: record failure
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
results_file[name].nres_time = -2
else: # Synthesis succeeded: record time for variant
lastLines = synthesis_res.stdout.split('\n')[-6:]
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
results_file[name].nres_time = (end - start)
pct_slower = results_file[name].time / (end - start)
results_file[name].pct_slowdown = pct_slower
without_res = synthesis_res.stdout.split('\n')[:-6]
# Compare outputs to see if resources led to any optimization
diff = difflib.unified_diff(with_res, str(without_res))
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end=' ')
try:
first = next(diff)
if with_res != '': print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OPTIMIZED' + Style.RESET_ALL, end=' ')
results_file[name].optimized = True
results_file[name].nres_code_size = solution_size
except StopIteration:
print('Unchanged', end=' ')
def run_micro_version(name, logfile, version, opts, set_time):
'''Run benchmark using enumerate-and-check version of synquid'''
start = time.time()
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark, mute output:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode == 124: # Timeout: record timeout
print(Back.RED + Fore.RED + Style.BRIGHT + version + 'TIMEOUT' + Style.RESET_ALL, end = ' ')
set_time('TO')
#results_file[name].field = 'TO'
#results_file[name].eac_time = 'TO'
elif synthesis_res.returncode: # Synthesis failed: record failure
print(Back.RED + Fore.RED + Style.BRIGHT + version + 'FAIL' + Style.RESET_ALL, end = ' ')
set_time(-2)
#results_file[name].field = -2
#results_file[name].eac_time = -2
else: # Synthesis succeeded: record time for variant
set_time(end - start)
#results_file[name].field = end - start
#results_file[name].eac_time = (end - start)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + version + 'OK' + Style.RESET_ALL, end=' ')
def format_time(t):
if isinstance(t, str):
return t
elif t < 0:
return '-'
else:
return '{0:0.2f}'.format(t)
def write_micro_csv():
'''Generate CSV file for micro benchmark'''
with open(MICRO_CSV_FILE, 'w') as outfile:
for b in MICRO_BENCHMARKS:
outfile.write (b.name + ',')
result = micro_results [b.name]
optstr = 'True' if result.optimized else '-'
outfile.write (result.spec_size + ',')
outfile.write (result.code_size + ',')
outfile.write (format_time(result.time) + ',')
outfile.write (format_time(result.nres_time) + ',')
#outfile.write (result.eac_time + ',')
outfile.write (format_time(result.eac_time) + ',')
outfile.write (format_time(result.incremental_time) + ',')
outfile.write (result.nres_code_size + ',')
#outfile.write (optstr + ',')
outfile.write ('\n')
def write_csv():
'''Generate CSV file from the results dictionary'''
with open(CSV_FILE, 'w') as outfile:
for group in groups:
for b in group.benchmarks:
outfile.write (b.name + ',')
result = results [b.name]
optstr = 'True' if result.optimized else '-'
outfile.write (result.spec_size + ',')
outfile.write (result.code_size + ',')
outfile.write (format_time(result.time) + ',')
outfile.write (format_time(result.nres_time) + ',')
outfile.write (result.nres_code_size + ',')
#outfile.write (result.eac_time + ',')
#outfile.write (optstr + ',')
outfile.write ('\n')
def write_micro_latex():
'''Generate Latex table from the results dictionary'''
total_count = 0
to_def = 0
to_nres = 0
with open(MICRO_LATEX_FILE, 'w') as outfile:
rownum = 1
for b in MICRO_BENCHMARKS:
result = micro_results [b.name]
optstr = 'Yes' if result.optimized else '-'
row = str(rownum) +\
' & ' + b.description +\
' & ' + b.signature + \
' & ' + str(b.components) + \
' & ' + format_time(result.time) + \
' & ' + format_time(result.nres_time) + \
' & ' + format_time(result.eac_time) + \
' & ' + format_time(result.incremental_time) + \
' & ' + b.complexity + \
' & ' + b.complexity_nr + ' \\\\'
#format_time(result.eac_time) + \
#' & ' + result.nres_code_size + ' \\\\'
#' & ' + str(b.num_programs) + \
#' & ' + str(result.eac_time) + ' \\\\'
#' & ' + optstr + ' \\\\'
outfile.write (row)
outfile.write ('\n')
rownum = rownum + 1
total_count = total_count + 1
print('Total:', total_count)
def write_latex():
'''Generate Latex table from the results dictionary'''
total_count = 0
to_def = 0
to_nres = 0
with open(LATEX_FILE, 'w') as outfile:
for group in groups:
outfile.write ('\multirow{')
outfile.write (str(group.benchmarks.__len__()))
outfile.write ('}{*}{\\parbox{1cm}{\\vspace{-0.85\\baselineskip}\center{')
outfile.write (group.name)
outfile.write ('}}}')
for b in group.benchmarks:
result = results [b.name]
optstr = 'Yes' if result.optimized else '-'
row = (
' & ' + b.description +
# ' & ' + result.goal_count + \
' & ' + str(b.components) +
#' & ' + result.measure_count + \
' & ' + result.code_size +
' & ' + format_time(result.time) +
' & ' + format_time(result.nres_time) + ' \\\\'
#' & ' + result.nres_code_size + \
#' & ' + str(b.num_programs) + \
#' & ' + format_time(result.eac_time) + ' \\\\'
#' & ' + optstr + ' \\\\'
)
outfile.write (row)
outfile.write ('\n')
total_count = total_count + 1
if result.nres_time < 0.0:
to_nres = to_nres + 1
outfile.write ('\\hline')
print('Total:', total_count)
print('TO nres:', to_nres)
def cmdline():
import argparse
a = argparse.ArgumentParser()
a.add_argument('--medium', action='store_true')
a.add_argument('--small', action='store_true')
a.add_argument('--rerun', nargs=1, help='Rerun given benchmark')
a.add_argument('--rerun-micro', nargs=1, help='Rerun given micro benchmark')
return a.parse_args()
if __name__ == '__main__':
init()
cl_opts = cmdline()
# Check if there are serialized results
if os.path.isfile(DUMPFILE):
results = pickle.load(open(DUMPFILE, 'rb'))
else:
results = dict()
if os.path.isfile(MICRO_DUMPFILE):
micro_results = pickle.load(open(MICRO_DUMPFILE, 'rb'))
else:
micro_results = dict()
# Delete old log file
if os.path.isfile(LOGFILE):
os.remove(LOGFILE)
if os.path.isfile(MICRO_LOGFILE):
os.remove(MICRO_LOGFILE)
# Run experiments
groups = ALL_BENCHMARKS[:1] if cl_opts.small else ALL_BENCHMARKS
if cl_opts.rerun:
bs = [b for g in groups for b in g if b.name == cl_opts.rerun[0]]
for b in bs:
print(b.str())
run_benchmark(b.name, b.options, [])
with open(DUMPFILE, 'wb') as data_dump:
pickle.dump(results, data_dump)
if cl_opts.rerun_micro:
bs = [b for b in MICRO_BENCHMARKS if b.name == cl_opts.rerun_micro[0]]
for b in bs:
print(b.str())
run_micro_benchmark(b.name, b.options, [], b.eac, b.incremental)
with open(MICRO_DUMPFILE, 'wb') as data_dump:
pickle.dump(micro_results, data_dump)
else:
for group in groups:
for b in group.benchmarks:
if b.name in results:
print(b.str() + Back.YELLOW + Fore.YELLOW + Style.BRIGHT + 'SKIPPED' + Style.RESET_ALL)
else:
print(b.str())
run_benchmark(b.name, b.options, group.default_options)
with open(DUMPFILE, 'wb') as data_dump:
pickle.dump(results, data_dump)
for b in MICRO_BENCHMARKS:
if b.name in micro_results:
print(b.str() + Back.YELLOW + Fore.YELLOW + Style.BRIGHT + 'SKIPPED' + Style.RESET_ALL)
else:
print(b.str())
run_micro_benchmark(b.name, b.options, [], b.eac, b.incremental)
with open(MICRO_DUMPFILE, 'wb') as data_dump:
pickle.dump(micro_results, data_dump)
med_slowdown = median([results[b.name].pct_slowdown for g in groups for b in g.benchmarks])
print('Median slowdown = ' + str(med_slowdown))
# Generate CSV
write_csv()
# Generate Latex table
write_latex()
write_micro_csv()
write_micro_latex()
# Compare with previous solutions and print the diff
if os.path.isfile(ORACLE_FILE) and (not cl_opts.small):
fromlines = open(ORACLE_FILE).readlines()
tolines = open(LOGFILE, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines, n=0)
print()
sys.stdout.writelines(diff)
if os.path.isfile(MICRO_ORACLE_FILE) and (not cl_opts.small):
fromlines = open(MICRO_ORACLE_FILE).readlines()
tolines = open(MICRO_LOGFILE, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines, n=0)
print()
sys.stdout.writelines(diff)
# Copy results to paper directory
shutil.copy('./' + LATEX_FILE, PAPER_PATH + LATEX_FILE)
shutil.copy('./' + MICRO_LATEX_FILE, PAPER_PATH + MICRO_LATEX_FILE)
| [
"pickle.dump",
"argparse.ArgumentParser",
"subprocess.run",
"re.match",
"difflib.unified_diff",
"os.path.isfile",
"statistics.median",
"platform.system",
"shutil.copy",
"time.time",
"sys.stdout.writelines",
"colorama.init",
"os.remove"
] | [((252, 269), 'platform.system', 'platform.system', ([], {}), '()\n', (267, 269), False, 'import platform\n'), ((21296, 21307), 'time.time', 'time.time', ([], {}), '()\n', (21305, 21307), False, 'import time\n'), ((21410, 21552), 'subprocess.run', 'run', (["(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + variant_opts + [name +\n '.sq'])"], {'stdout': 'PIPE', 'stderr': 'PIPE', 'universal_newlines': '(True)'}), "(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + variant_opts + [\n name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)\n", (21413, 21552), False, 'from subprocess import run, PIPE\n'), ((21566, 21577), 'time.time', 'time.time', ([], {}), '()\n', (21575, 21577), False, 'import time\n'), ((23169, 23180), 'time.time', 'time.time', ([], {}), '()\n', (23178, 23180), False, 'import time\n'), ((23283, 23416), 'subprocess.run', 'run', (["(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + opts + [name + '.sq'])"], {'stdout': 'PIPE', 'stderr': 'PIPE', 'universal_newlines': '(True)'}), "(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + opts + [name +\n '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)\n", (23286, 23416), False, 'from subprocess import run, PIPE\n'), ((23423, 23434), 'time.time', 'time.time', ([], {}), '()\n', (23432, 23434), False, 'import time\n'), ((28882, 28907), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28905, 28907), False, 'import argparse\n'), ((29219, 29225), 'colorama.init', 'init', ([], {}), '()\n', (29223, 29225), False, 'from colorama import init, Fore, Back, Style\n'), ((29303, 29327), 'os.path.isfile', 'os.path.isfile', (['DUMPFILE'], {}), '(DUMPFILE)\n', (29317, 29327), False, 'import os, os.path\n'), ((29424, 29454), 'os.path.isfile', 'os.path.isfile', (['MICRO_DUMPFILE'], {}), '(MICRO_DUMPFILE)\n', (29438, 29454), False, 'import os, os.path\n'), ((29595, 29618), 'os.path.isfile', 'os.path.isfile', (['LOGFILE'], {}), '(LOGFILE)\n', (29609, 29618), False, 'import os, os.path\n'), ((29653, 29682), 'os.path.isfile', 'os.path.isfile', (['MICRO_LOGFILE'], {}), '(MICRO_LOGFILE)\n', (29667, 29682), False, 'import os, os.path\n'), ((31371, 31447), 'statistics.median', 'median', (['[results[b.name].pct_slowdown for g in groups for b in g.benchmarks]'], {}), '([results[b.name].pct_slowdown for g in groups for b in g.benchmarks])\n', (31377, 31447), False, 'from statistics import median\n'), ((32293, 32348), 'shutil.copy', 'shutil.copy', (["('./' + LATEX_FILE)", '(PAPER_PATH + LATEX_FILE)'], {}), "('./' + LATEX_FILE, PAPER_PATH + LATEX_FILE)\n", (32304, 32348), False, 'import shutil\n'), ((32353, 32420), 'shutil.copy', 'shutil.copy', (["('./' + MICRO_LATEX_FILE)", '(PAPER_PATH + MICRO_LATEX_FILE)'], {}), "('./' + MICRO_LATEX_FILE, PAPER_PATH + MICRO_LATEX_FILE)\n", (32364, 32420), False, 'import shutil\n'), ((16248, 16259), 'time.time', 'time.time', ([], {}), '()\n', (16257, 16259), False, 'import time\n'), ((16388, 16537), 'subprocess.run', 'run', (["(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts +\n [name + '.sq'])"], {'stdout': 'PIPE', 'stderr': 'PIPE', 'universal_newlines': '(True)'}), "(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS +\n opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)\n", (16391, 16537), False, 'from subprocess import run, PIPE\n'), ((16546, 16557), 'time.time', 'time.time', ([], {}), '()\n', (16555, 16557), False, 'import time\n'), ((18479, 18490), 'time.time', 'time.time', ([], {}), '()\n', (18488, 18490), False, 'import time\n'), ((18619, 18768), 'subprocess.run', 'run', (["(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts +\n [name + '.sq'])"], {'stdout': 'PIPE', 'stderr': 'PIPE', 'universal_newlines': '(True)'}), "(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS +\n opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)\n", (18622, 18768), False, 'from subprocess import run, PIPE\n'), ((18777, 18788), 'time.time', 'time.time', ([], {}), '()\n', (18786, 18788), False, 'import time\n'), ((29626, 29644), 'os.remove', 'os.remove', (['LOGFILE'], {}), '(LOGFILE)\n', (29635, 29644), False, 'import os, os.path\n'), ((29690, 29714), 'os.remove', 'os.remove', (['MICRO_LOGFILE'], {}), '(MICRO_LOGFILE)\n', (29699, 29714), False, 'import os, os.path\n'), ((31693, 31720), 'os.path.isfile', 'os.path.isfile', (['ORACLE_FILE'], {}), '(ORACLE_FILE)\n', (31707, 31720), False, 'import os, os.path\n'), ((31860, 31905), 'difflib.unified_diff', 'difflib.unified_diff', (['fromlines', 'tolines'], {'n': '(0)'}), '(fromlines, tolines, n=0)\n', (31880, 31905), False, 'import difflib\n'), ((31930, 31957), 'sys.stdout.writelines', 'sys.stdout.writelines', (['diff'], {}), '(diff)\n', (31951, 31957), False, 'import sys\n'), ((31966, 31999), 'os.path.isfile', 'os.path.isfile', (['MICRO_ORACLE_FILE'], {}), '(MICRO_ORACLE_FILE)\n', (31980, 31999), False, 'import os, os.path\n'), ((32151, 32196), 'difflib.unified_diff', 'difflib.unified_diff', (['fromlines', 'tolines'], {'n': '(0)'}), '(fromlines, tolines, n=0)\n', (32171, 32196), False, 'import difflib\n'), ((32221, 32248), 'sys.stdout.writelines', 'sys.stdout.writelines', (['diff'], {}), '(diff)\n', (32242, 32248), False, 'import sys\n'), ((30070, 30101), 'pickle.dump', 'pickle.dump', (['results', 'data_dump'], {}), '(results, data_dump)\n', (30081, 30101), False, 'import pickle\n'), ((30408, 30445), 'pickle.dump', 'pickle.dump', (['micro_results', 'data_dump'], {}), '(micro_results, data_dump)\n', (30419, 30445), False, 'import pickle\n'), ((17165, 17213), 're.match', 're.match', (['"""\\\\(Goals: (\\\\d+)\\\\).*$"""', 'lastLines[0]'], {}), "('\\\\(Goals: (\\\\d+)\\\\).*$', lastLines[0])\n", (17173, 17213), False, 'import re\n'), ((17246, 17297), 're.match', 're.match', (['"""\\\\(Measures: (\\\\d+)\\\\).*$"""', 'lastLines[1]'], {}), "('\\\\(Measures: (\\\\d+)\\\\).*$', lastLines[1])\n", (17254, 17297), False, 'import re\n'), ((17326, 17378), 're.match', 're.match', (['"""\\\\(Spec size: (\\\\d+)\\\\).*$"""', 'lastLines[2]'], {}), "('\\\\(Spec size: (\\\\d+)\\\\).*$', lastLines[2])\n", (17334, 17378), False, 'import re\n'), ((17411, 17467), 're.match', 're.match', (['"""\\\\(Solution size: (\\\\d+)\\\\).*$"""', 'lastLines[3]'], {}), "('\\\\(Solution size: (\\\\d+)\\\\).*$', lastLines[3])\n", (17419, 17467), False, 'import re\n'), ((17502, 17575), 're.match', 're.match', (['"""\\\\(Number of resource constraints: (\\\\d+)\\\\).*$"""', 'lastLines[4]'], {}), "('\\\\(Number of resource constraints: (\\\\d+)\\\\).*$', lastLines[4])\n", (17510, 17575), False, 'import re\n'), ((19402, 19450), 're.match', 're.match', (['"""\\\\(Goals: (\\\\d+)\\\\).*$"""', 'lastLines[0]'], {}), "('\\\\(Goals: (\\\\d+)\\\\).*$', lastLines[0])\n", (19410, 19450), False, 'import re\n'), ((19483, 19534), 're.match', 're.match', (['"""\\\\(Measures: (\\\\d+)\\\\).*$"""', 'lastLines[1]'], {}), "('\\\\(Measures: (\\\\d+)\\\\).*$', lastLines[1])\n", (19491, 19534), False, 'import re\n'), ((19563, 19615), 're.match', 're.match', (['"""\\\\(Spec size: (\\\\d+)\\\\).*$"""', 'lastLines[2]'], {}), "('\\\\(Spec size: (\\\\d+)\\\\).*$', lastLines[2])\n", (19571, 19615), False, 'import re\n'), ((19648, 19704), 're.match', 're.match', (['"""\\\\(Solution size: (\\\\d+)\\\\).*$"""', 'lastLines[3]'], {}), "('\\\\(Solution size: (\\\\d+)\\\\).*$', lastLines[3])\n", (19656, 19704), False, 'import re\n'), ((19739, 19812), 're.match', 're.match', (['"""\\\\(Number of resource constraints: (\\\\d+)\\\\).*$"""', 'lastLines[4]'], {}), "('\\\\(Number of resource constraints: (\\\\d+)\\\\).*$', lastLines[4])\n", (19747, 19812), False, 'import re\n'), ((22198, 22254), 're.match', 're.match', (['"""\\\\(Solution size: (\\\\d+)\\\\).*$"""', 'lastLines[3]'], {}), "('\\\\(Solution size: (\\\\d+)\\\\).*$', lastLines[3])\n", (22206, 22254), False, 'import re\n'), ((31313, 31350), 'pickle.dump', 'pickle.dump', (['micro_results', 'data_dump'], {}), '(micro_results, data_dump)\n', (31324, 31350), False, 'import pickle\n'), ((30889, 30920), 'pickle.dump', 'pickle.dump', (['results', 'data_dump'], {}), '(results, data_dump)\n', (30900, 30920), False, 'import pickle\n')] |
import sys
import signal
from clint.textui import colored, puts
from downloader import Downloader
from extractor import Extractor
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
def main():
downloader = Downloader()
extractor = Extractor()
url = "https://pornhub.com"
puts(colored.green("getting video keys."))
main_page = downloader.get(url)
view_keys = extractor.get_viewkeys(main_page)
puts(colored.green("starting to download videos."))
for key in view_keys:
puts(colored.green("getting video information."))
absolute_url = "https://pornhub.com/view_video.php?viewkey=" + key
page = downloader.get(absolute_url)
info = extractor.get_video_info(page)
if info is None:
continue
hd_quality = info['mediaDefinitions'][0]
puts(colored.green("downloading video %s." % info['video_title']))
downloader.save_file(hd_quality["videoUrl"], info['video_title'] + ".mp4")
if __name__ == "__main__":
main()
| [
"extractor.Extractor",
"sys.exit",
"clint.textui.colored.green",
"downloader.Downloader"
] | [((217, 229), 'downloader.Downloader', 'Downloader', ([], {}), '()\n', (227, 229), False, 'from downloader import Downloader\n'), ((246, 257), 'extractor.Extractor', 'Extractor', ([], {}), '()\n', (255, 257), False, 'from extractor import Extractor\n'), ((173, 184), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (181, 184), False, 'import sys\n'), ((300, 336), 'clint.textui.colored.green', 'colored.green', (['"""getting video keys."""'], {}), "('getting video keys.')\n", (313, 336), False, 'from clint.textui import colored, puts\n'), ((434, 479), 'clint.textui.colored.green', 'colored.green', (['"""starting to download videos."""'], {}), "('starting to download videos.')\n", (447, 479), False, 'from clint.textui import colored, puts\n'), ((520, 563), 'clint.textui.colored.green', 'colored.green', (['"""getting video information."""'], {}), "('getting video information.')\n", (533, 563), False, 'from clint.textui import colored, puts\n'), ((840, 900), 'clint.textui.colored.green', 'colored.green', (["('downloading video %s.' % info['video_title'])"], {}), "('downloading video %s.' % info['video_title'])\n", (853, 900), False, 'from clint.textui import colored, puts\n')] |
from cryptography.fernet import Fernet
import os
import discord
import aiohttp
import secrets
from urllib.parse import quote
from dotenv import load_dotenv
load_dotenv()
class OAuth:
def __init__(self):
# User Provided Data
self.client_id = os.getenv("CID")
self.client_secret = os.getenv("CIS")
self.scope = ["identify", "guilds"]
# Generated Data
self.redirect_uri = "http://localhost:5000/callback"
self.oauth_base = "https://discord.com/api/oauth2"
self.discord_login_url = self.generate_login_url()
self.discord_token_url = self.oauth_base+"/token"
self.discord_api_url = "https://discord.com/api"
self.bot_invite_url = self.generate_bot_invite_url()
self.client = None
async def create_client(self):
self.client = aiohttp.ClientSession()
def generate_login_url(self):
# Generate a login url, used during authorization
return self.oauth_base+"/authorize?client_id={}&redirect_uri={}&response_type=code&scope={}".format(
self.client_id, quote(self.redirect_uri), quote(" ".join(self.scope)))
def generate_bot_invite_url(self):
# Generate a bot invite url, used in the guild selection page
return self.oauth_base+"/authorize?client_id={}&permissions=0&redirect_uri={}&scope=bot".format(
self.client_id, quote(self.redirect_uri))
async def getaccesstoken(self, code):
# Get a user access token required to make api calls with discord
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': self.redirect_uri,
'scope': " ".join(self.scope)
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
token = await self.client.post(self.discord_token_url,
data=data, headers=headers)
return await token.json()
async def refreshtoken(self, refresh_token):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = await self.client.post('%s/oauth2/token' % self.discord_api_url, data=data, headers=headers)
return await r.json()
async def getuser(self, accesstoken):
# Fetch user using an access token
url = self.discord_api_url+"/users/@me"
headers = {
'Authorization': 'Bearer {}'.format(accesstoken)
}
payload = await self.client.get(url, headers=headers)
data = await payload.json()
user = User(
name=data["username"],
userid=data["id"],
discriminator=data["discriminator"],
avatar=data["avatar"]
)
return user
async def getuserguilds(self, accesstoken):
# Fetch user guilds using an access token
url = self.discord_api_url+"/users/@me/guilds"
headers = {
'Authorization': 'Bearer {}'.format(accesstoken)
}
payload = await self.client.get(url, headers=headers)
data = await payload.json()
guilds = [
Guild(
name=i["name"],
guildid=i["id"],
features=i["features"],
icon=i["icon"],
owner=i["owner"],
permissions=i["permissions"]
) for i in data
]
return guilds
# Abstract Classes
class User:
'''
Encryption System:
On First Authentication:
key, session_id
[Server] -----------------> [Client]
* Server encrypts the access token using a special key,
tells the client about the key and then forgets it.
On succeding requests:
key, session_id
[Client] -----------------> [Server]
* Client requests the corresponding session from the server,
verifies their integrity using the special key. If the key
is invalid, server resets their session in order to protect
fraudulent attempts.
'''
def __init__(self, name, discriminator, userid, avatar):
self.name = name
self.discriminator = int(discriminator)
self.id = int(userid)
self.avatar_hash = avatar
self.session_id = None
self.access_token = None
def __repr__(self):
return f"{self.name}#{self.discriminator}"
def __str__(self):
return f"{self.name}#{self.discriminator}"
def avatar_url(self, size=256):
return f"https://cdn.discordapp.com/avatars/{self.id}/{self.avatar_hash}.png?size={size}"
def retrieve_access_token(self, key):
F = Fernet(key.encode("utf-8"))
try:
return F.decrypt(self.access_token)
except:
return False
def set_access_token(self, access_token):
key = Fernet.generate_key()
F = Fernet(key)
self.access_token = F.encrypt(access_token)
return key.decode("utf-8")
def create_session(self, access_token):
self.session_id=secrets.token_urlsafe(nbytes=16)
self.set_access_token(access_token)
class Guild:
def __init__(self, name, guildid, features, icon, owner, permissions):
self.name = name
self.id = int(guildid)
self.features = features
self.icon_hash = icon
self.is_owner = owner
self.permissions = discord.Permissions(permissions=int(permissions))
def __repr__(self):
return f"{self.name}"
def __str__(self):
return f"{self.name}"
def icon_url(self, size=256):
return f"https://cdn.discordapp.com/icons/{self.id}/{self.icon_hash}.png?size={size}"
class SessionHandler:
'''
This class will handle caching and loading dashboard sessions
it will be exposed via the OAuth Object, Mongo DB is required
'''
pass | [
"aiohttp.ClientSession",
"os.getenv",
"secrets.token_urlsafe",
"urllib.parse.quote",
"dotenv.load_dotenv",
"cryptography.fernet.Fernet",
"cryptography.fernet.Fernet.generate_key"
] | [((156, 169), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (167, 169), False, 'from dotenv import load_dotenv\n'), ((262, 278), 'os.getenv', 'os.getenv', (['"""CID"""'], {}), "('CID')\n", (271, 278), False, 'import os\n'), ((308, 324), 'os.getenv', 'os.getenv', (['"""CIS"""'], {}), "('CIS')\n", (317, 324), False, 'import os\n'), ((835, 858), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (856, 858), False, 'import aiohttp\n'), ((5247, 5268), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ([], {}), '()\n', (5266, 5268), False, 'from cryptography.fernet import Fernet\n'), ((5281, 5292), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (5287, 5292), False, 'from cryptography.fernet import Fernet\n'), ((5449, 5481), 'secrets.token_urlsafe', 'secrets.token_urlsafe', ([], {'nbytes': '(16)'}), '(nbytes=16)\n', (5470, 5481), False, 'import secrets\n'), ((1089, 1113), 'urllib.parse.quote', 'quote', (['self.redirect_uri'], {}), '(self.redirect_uri)\n', (1094, 1113), False, 'from urllib.parse import quote\n'), ((1387, 1411), 'urllib.parse.quote', 'quote', (['self.redirect_uri'], {}), '(self.redirect_uri)\n', (1392, 1411), False, 'from urllib.parse import quote\n')] |
from __future__ import division
import numpy as np
def SoftmaxLoss2(w, X, y, k):
# w(feature*class,1) - weights for last class assumed to be 0
# X(instance,feature)
# y(instance,1)
#
# version of SoftmaxLoss where weights for last class are fixed at 0
# to avoid overparameterization
n, p = X.shape
w = w.reshape((p, k - 1))
w = np.hstack((w, np.zeros((p, 1))))
Z = np.exp(X.dot(w)).sum(axis=1)
nll = -((X * w[:, y].T).sum(axis=1) - np.log(Z)).sum()
g = np.zeros((p, k - 1))
for c in xrange(k - 1):
g[:, c] = -(X * ((y == c) - np.exp(X.dot(w[:, c])) / Z)
[:, np.newaxis]).sum(axis=0)
g = np.ravel(g)
return nll, g
| [
"numpy.ravel",
"numpy.zeros",
"numpy.log"
] | [((509, 529), 'numpy.zeros', 'np.zeros', (['(p, k - 1)'], {}), '((p, k - 1))\n', (517, 529), True, 'import numpy as np\n'), ((684, 695), 'numpy.ravel', 'np.ravel', (['g'], {}), '(g)\n', (692, 695), True, 'import numpy as np\n'), ((384, 400), 'numpy.zeros', 'np.zeros', (['(p, 1)'], {}), '((p, 1))\n', (392, 400), True, 'import numpy as np\n'), ((483, 492), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (489, 492), True, 'import numpy as np\n')] |
# Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
from os import path
def test_config(testing_config):
with testing_config() as config:
assert config.filename().endswith('rosshm.ini')
assert config.getbool('debug')
with testing_config(init = False):
config.init(fn = None)
assert config.getbool('debug')
def test_database(testing_config):
with testing_config() as config:
db = config.database()
assert db['driver'] == 'sqlite'
assert db['name'] == ':memory:'
assert db['config'] == ''
with testing_config() as config:
fn = path.join(path.sep, 'testing', 'db.cfg')
config._cfg.set('rosshm', 'db.config', fn)
db = config.database()
assert db['config'] == fn
| [
"os.path.join"
] | [((551, 591), 'os.path.join', 'path.join', (['path.sep', '"""testing"""', '"""db.cfg"""'], {}), "(path.sep, 'testing', 'db.cfg')\n", (560, 591), False, 'from os import path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 09 february, 2021
Testing suite for BNetwork class
@author: <NAME>
@email: <EMAIL>
@date: 09 february, 2021
"""
import unittest
import os
import numpy as np
from topopy import Flow, Basin, Network, BNetwork, DEM
from topopy.network import NetworkError
infolder = "data/in"
outfolder = "data/out"
class BNetworkClassTest(unittest.TestCase):
# Indices de las cabeceras que deben de salir (para comprobar)
results = {"small25":dict([(1, 16171), (2, 9354), (3,1463)]),
"jebja30":dict([(1, 151755), (2, 44786), (3, 48709), (4, 3819)]),
"tunez":dict([(1, 77552), (2, 30013), (3, 7247)])}
def test_BNetwork_class00(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un Grid de cuencas
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class01(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un objeto Basin
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin)
# Este test solo verifica que se realice sin fallos y que
# el objeto bnet tiene una única cabecera
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class03(self):
"""
Test que prueba cabeceras en cuenca 1 con small25
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([13494, 16171])), True)
def test_BNetwork_class04(self):
"""
Test que prueba cabeceras en cuenca 1 con small25 (sin utilizar id field)
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
# Remove the id column
heads = heads[:,:-1]
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([16171, 13494])), True)
def test_BNetwork_class05(self):
"""
Test de creado masivo de cuencas con cabeceras aleatorias
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
# Generamos todas las cuencas
cuencas = fd.get_drainage_basins(min_area = 0.0025)
# Generamos 50 puntos aleatorios dentro de la extensión del objeto Network
# Estos 50 puntos se usaran como cabeceras
xmin, xmax, ymin, ymax = net.get_extent()
xi = np.random.randint(xmin, xmax, 50)
yi = np.random.randint(ymin, ymax, 50)
heads = np.array((xi, yi)).T
# Cogemos 5 cuencas aleatorias
bids = np.random.choice(np.unique(cuencas.read_array())[1:], 5)
for bid in bids:
try:
if np.random.randint(100) < 70:
bnet = BNetwork(net, cuencas, heads, bid)
else:
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin, heads)
except NetworkError:
print("Network of {} file inside the basin {} has not enough pixels".format(file, bid))
continue
# Salvamos BNetwork y volvemos a cargar para comprobar que se cargan-guardan bien
bnet_path = "{}/{}_{}_bnet.dat".format(outfolder, file, bid)
bnet.save(bnet_path)
bnet2 = BNetwork(bnet_path)
computed = np.array_equal(bnet._ix, bnet2._ix)
self.assertEqual(computed, True)
# borramos archivo
os.remove(bnet_path)
if __name__ == "__main__":
unittest.main() | [
"topopy.BNetwork",
"os.remove",
"numpy.array",
"numpy.random.randint",
"numpy.array_equal",
"unittest.main",
"topopy.Basin"
] | [((6743, 6758), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6756, 6758), False, 'import unittest\n'), ((3650, 3684), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (3658, 3684), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((4599, 4633), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (4607, 4633), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((5511, 5544), 'numpy.random.randint', 'np.random.randint', (['xmin', 'xmax', '(50)'], {}), '(xmin, xmax, 50)\n', (5528, 5544), True, 'import numpy as np\n'), ((5562, 5595), 'numpy.random.randint', 'np.random.randint', (['ymin', 'ymax', '(50)'], {}), '(ymin, ymax, 50)\n', (5579, 5595), True, 'import numpy as np\n'), ((1528, 1561), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'None', 'bid'], {}), '(net, cuencas, None, bid)\n', (1536, 1561), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2558, 2582), 'topopy.Basin', 'Basin', (['dem', 'cuencas', 'bid'], {}), '(dem, cuencas, bid)\n', (2563, 2582), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2606, 2626), 'topopy.BNetwork', 'BNetwork', (['net', 'basin'], {}), '(net, basin)\n', (2614, 2626), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2782, 2815), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'None', 'bid'], {}), '(net, cuencas, None, bid)\n', (2790, 2815), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((3738, 3762), 'numpy.array', 'np.array', (['[13494, 16171]'], {}), '([13494, 16171])\n', (3746, 3762), True, 'import numpy as np\n'), ((4687, 4711), 'numpy.array', 'np.array', (['[16171, 13494]'], {}), '([16171, 13494])\n', (4695, 4711), True, 'import numpy as np\n'), ((5616, 5634), 'numpy.array', 'np.array', (['(xi, yi)'], {}), '((xi, yi))\n', (5624, 5634), True, 'import numpy as np\n'), ((6506, 6525), 'topopy.BNetwork', 'BNetwork', (['bnet_path'], {}), '(bnet_path)\n', (6514, 6525), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((6553, 6588), 'numpy.array_equal', 'np.array_equal', (['bnet._ix', 'bnet2._ix'], {}), '(bnet._ix, bnet2._ix)\n', (6567, 6588), True, 'import numpy as np\n'), ((6689, 6709), 'os.remove', 'os.remove', (['bnet_path'], {}), '(bnet_path)\n', (6698, 6709), False, 'import os\n'), ((5842, 5864), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (5859, 5864), True, 'import numpy as np\n'), ((5902, 5936), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (5910, 5936), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((5995, 6019), 'topopy.Basin', 'Basin', (['dem', 'cuencas', 'bid'], {}), '(dem, cuencas, bid)\n', (6000, 6019), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((6051, 6078), 'topopy.BNetwork', 'BNetwork', (['net', 'basin', 'heads'], {}), '(net, basin, heads)\n', (6059, 6078), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_useless_aliases(apps, schema_editor):
"""Remove aliases linked to disabled messages."""
ARmessage = apps.get_model("modoboa_postfix_autoreply", "ARmessage")
AliasRecipient = apps.get_model("admin", "AliasRecipient")
qset = ARmessage.objects.select_related("mbox", "mbox__domain").filter(
enabled=False)
for armessage in qset:
alr_address = u"{0}@{1}@autoreply.{1}".format(
armessage.mbox.address, armessage.mbox.domain)
try:
alr = AliasRecipient.objects.get(address=alr_address)
except AliasRecipient.DoesNotExist:
continue
alias = alr.alias
alr.delete()
if not alias.recipients_count:
alias.delete()
class Migration(migrations.Migration):
dependencies = [
('modoboa_postfix_autoreply', '0004_delete_alias'),
]
operations = [
migrations.RunPython(remove_useless_aliases),
]
| [
"django.db.migrations.RunPython"
] | [((1001, 1045), 'django.db.migrations.RunPython', 'migrations.RunPython', (['remove_useless_aliases'], {}), '(remove_useless_aliases)\n', (1021, 1045), False, 'from django.db import migrations, models\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from util import log
from pprint import pprint
from input_ops import create_input_ops
from model import Model
import os
import time
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
class Trainer(object):
def __init__(self,
config,
dataset,
dataset_test):
self.config = config
hyper_parameter_str = 'bs_{}_lr_flow_{}_pixel_{}_d_{}'.format(
config.batch_size,
config.learning_rate_f,
config.learning_rate_p,
config.learning_rate_d,
)
self.train_dir = './train_dir/%s-%s-%s-num_input-%s-%s' % (
config.dataset,
config.prefix,
hyper_parameter_str,
str(config.num_input),
time.strftime("%Y%m%d-%H%M%S")
)
if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)
log.infov("Train Dir: %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
_, self.batch_train = create_input_ops(
dataset, self.batch_size, is_training=True)
_, self.batch_test = create_input_ops(
dataset_test, self.batch_size, is_training=False)
# --- create model ---
self.model = Model(config)
# --- optimizer ---
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.learning_rate_p = config.learning_rate_p
self.learning_rate_f = config.learning_rate_f
self.learning_rate_d = config.learning_rate_d
self.check_op = tf.no_op()
# --- checkpoint and monitoring ---
all_vars = tf.trainable_variables()
f_var = [v for v in all_vars if 'Flow' in v.op.name or 'flow' in v.op.name]
log.warn("********* f_var ********** ")
slim.model_analyzer.analyze_vars(f_var, print_info=True)
p_var = [v for v in all_vars if 'Pixel' in v.op.name or 'pixel' in v.op.name]
log.warn("********* p_var ********** ")
slim.model_analyzer.analyze_vars(p_var, print_info=True)
d_var = [v for v in all_vars if v.op.name.startswith('Discriminator')]
log.warn("********* d_var ********** ")
slim.model_analyzer.analyze_vars(d_var, print_info=True)
# the whole model without the discriminator
g_var = p_var + f_var
self.f_optimizer = tf.train.AdamOptimizer(
self.learning_rate_f,
).minimize(self.model.flow_loss,
var_list=f_var, name='optimizer_flow_loss')
self.p_optimizer = tf.train.AdamOptimizer(
self.learning_rate_p,
).minimize(self.model.pixel_loss, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss')
self.p_optimizer_gan = tf.train.AdamOptimizer(
self.learning_rate_p,
beta1=0.5
).minimize(self.model.pixel_loss_gan, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss_gan')
self.d_optimizer = tf.train.AdamOptimizer(
self.learning_rate_d,
beta1=0.5
).minimize(self.model.d_loss, global_step=self.global_step,
var_list=d_var, name='optimizer_discriminator_loss')
self.train_summary_op = tf.summary.merge_all(key='train')
self.test_summary_op = tf.summary.merge_all(key='test')
self.saver = tf.train.Saver(max_to_keep=100)
self.pretrain_saver = tf.train.Saver(var_list=all_vars, max_to_keep=1)
self.pretrain_saver_p = tf.train.Saver(var_list=p_var, max_to_keep=1)
self.pretrain_saver_f = tf.train.Saver(var_list=f_var, max_to_keep=1)
self.pretrain_saver_g = tf.train.Saver(var_list=g_var, max_to_keep=1)
self.pretrain_saver_d = tf.train.Saver(var_list=d_var, max_to_keep=1)
self.summary_writer = tf.summary.FileWriter(self.train_dir)
self.max_steps = self.config.max_steps
self.ckpt_save_step = self.config.ckpt_save_step
self.log_step = self.config.log_step
self.test_sample_step = self.config.test_sample_step
self.write_summary_step = self.config.write_summary_step
self.gan_start_step = self.config.gan_start_step
self.checkpoint_secs = 600 # 10 min
self.supervisor = tf.train.Supervisor(
logdir=self.train_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.global_step,
)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)
self.ckpt_path = config.checkpoint
if self.ckpt_path is not None:
log.info("Checkpoint path: %s", self.ckpt_path)
self.pretrain_saver.restore(self.session, self.ckpt_path, )
log.info("Loaded the pretrain parameters from the provided checkpoint path")
self.ckpt_path_f = config.checkpoint_f
if self.ckpt_path_f is not None:
log.info("Checkpoint path: %s", self.ckpt_path_f)
self.pretrain_saver_f.restore(self.session, self.ckpt_path_f)
log.info("Loaded the pretrain Flow module from the provided checkpoint path")
self.ckpt_path_p = config.checkpoint_p
if self.ckpt_path_p is not None:
log.info("Checkpoint path: %s", self.ckpt_path_p)
self.pretrain_saver_p.restore(self.session, self.ckpt_path_p)
log.info("Loaded the pretrain Pixel module from the provided checkpoint path")
self.ckpt_path_g = config.checkpoint_g
if self.ckpt_path_g is not None:
log.info("Checkpoint path: %s", self.ckpt_path_g)
self.pretrain_saver_g.restore(self.session, self.ckpt_path_g)
log.info("Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path")
self.ckpt_path_d = config.checkpoint_d
if self.ckpt_path_d is not None:
log.info("Checkpoint path: %s", self.ckpt_path_d)
self.pretrain_saver_d.restore(self.session, self.ckpt_path_d)
log.info("Loaded the pretrain Discriminator module from the provided checkpoint path")
def train(self):
log.infov("Training Starts!")
pprint(self.batch_train)
max_steps = self.max_steps
ckpt_save_step = self.ckpt_save_step
log_step = self.log_step
test_sample_step = self.test_sample_step
write_summary_step = self.write_summary_step
gan_start_step = self.gan_start_step
for s in xrange(max_steps):
# periodic inference
if s % test_sample_step == 0:
step, test_summary, p_loss, f_loss, loss, output, step_time = \
self.run_test(self.batch_test, step=s, is_train=False)
self.log_step_message(step, p_loss, f_loss, loss, step_time, is_train=False)
self.summary_writer.add_summary(test_summary, global_step=step)
step, train_summary, p_loss, f_loss, loss, output, step_time = \
self.run_single_step(self.batch_train, step=s,
opt_gan=s > gan_start_step, is_train=True)
if s % log_step == 0:
self.log_step_message(step, p_loss, f_loss, loss, step_time)
if s % write_summary_step == 0:
self.summary_writer.add_summary(train_summary, global_step=step)
if s % ckpt_save_step == 0:
log.infov("Saved checkpoint at %d", s)
save_path = self.saver.save(
self.session, os.path.join(self.train_dir, 'model'),
global_step=step)
def run_single_step(self, batch, step=None, opt_gan=False, is_train=True):
_start_time = time.time()
batch_chunk = self.session.run(batch)
fetch = [self.global_step, self.train_summary_op, self.model.output,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.check_op]
# fetch optimizers
if not opt_gan:
# optimize only l1 losses
fetch += [self.p_optimizer, self.f_optimizer]
else:
if step % (self.config.update_rate+1) > 0:
# train the generator
fetch += [self.p_optimizer_gan, self.f_optimizer]
else:
# train the discriminator
fetch += [self.d_optimizer]
fetch_values = self.session.run(
fetch,
feed_dict=self.model.get_feed_dict(batch_chunk, step=step)
)
[step, summary, output, p_loss, f_loss, loss] = fetch_values[:6]
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def run_test(self, batch, step, is_train=False):
_start_time = time.time()
batch_chunk = self.session.run(batch)
step, summary, p_loss, f_loss, loss, output = self.session.run(
[self.global_step, self.test_summary_op,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.model.output],
feed_dict=self.model.get_feed_dict(batch_chunk, step=step, is_training=False)
)
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def log_step_message(self, step, p_loss, f_loss, loss, step_time, is_train=True):
if step_time == 0: step_time = 0.001
log_fn = (is_train and log.info or log.infov)
log_fn((" [{split_mode:5s} step {step:4d}] " +
"Loss: {loss:.5f} " +
"Pixel loss: {p_loss:.5f} " +
"Flow loss: {f_loss:.5f} " +
"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec) "
).format(split_mode=(is_train and 'train' or 'val'),
step=step,
loss=loss,
p_loss=p_loss,
f_loss=f_loss,
sec_per_batch=step_time,
instance_per_sec=self.batch_size / step_time
)
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8,
help='the mini-batch size')
parser.add_argument('--prefix', type=str, default='default',
help='a nickname for the training')
parser.add_argument('--dataset', type=str, default='car', choices=[
'car', 'chair', 'kitti', 'synthia'],
help='you can add your own dataset here')
parser.add_argument('--num_input', type=int, default=2,
help='the number of source images')
parser.add_argument('--checkpoint', type=str, default=None,
help='load all the parameters including the flow and '
'pixel modules and the discriminator')
parser.add_argument('--checkpoint_p', type=str, default=None,
help='load the parameters of the pixel module')
parser.add_argument('--checkpoint_f', type=str, default=None,
help='load the parameters of the flow module')
parser.add_argument('--checkpoint_g', type=str, default=None,
help='load the parameters of both the flow and pixel module')
parser.add_argument('--checkpoint_d', type=str, default=None,
help='load the parameters of the discriminator')
# Log
parser.add_argument('--log_step', type=int, default=10,
help='the frequency of outputing log info')
parser.add_argument('--ckpt_save_step', type=int, default=5000,
help='the frequency of saving a checkpoint')
parser.add_argument('--test_sample_step', type=int, default=100,
help='the frequency of performing testing inference during training')
parser.add_argument('--write_summary_step', type=int, default=100,
help='the frequency of writing TensorBoard summaries')
# Learning
parser.add_argument('--max_steps', type=int, default=10000000,
help='the max training iterations')
parser.add_argument('--learning_rate_p', type=float, default=5e-5,
help='the learning rate of the pixel module')
parser.add_argument('--learning_rate_f', type=float, default=1e-4,
help='the learning rate of the flow module')
parser.add_argument('--learning_rate_d', type=float, default=1e-4,
help='the learning rate of the discriminator')
parser.add_argument('--local_confidence_weight', type=int, default=1e-2,
help='the weight of the confidence prediction objective')
# Architecture
parser.add_argument('--num_res_block_pixel', type=int, default=0,
help='the number of residual block in the bottleneck of the pixel module')
parser.add_argument('--num_res_block_flow', type=int, default=4,
help='the number of residual block in the bottleneck of the flow module')
parser.add_argument('--num_dis_conv_layer', type=int, default=5,
help='the number of convolutional layers of the discriminator')
parser.add_argument('--num_conv_layer', type=int, default=5,
help='the number of convolutional layers of '
'the encoder of both the flow and pixel modules')
parser.add_argument('--num_convlstm_block', type=int, default=2,
help='the number of residual ConvLSTM block of the pixel module')
parser.add_argument('--num_convlstm_scale', type=int, default=3,
help='how many innermost layers of the pixel module '
'have a residual ConvLSTM connection')
parser.add_argument('--norm_type', type=str, default='None',
choices=['batch', 'instance', 'None'],
help='the type of normalization')
# GAN
parser.add_argument('--gan_type', type=str, default='ls', choices=['ls', 'normal'],
help='the type of GAN losses such as LS-GAN, WGAN, etc')
parser.add_argument('--gan_start_step', type=int, default=5e5,
help='start to optimize the GAN loss when the model is stable')
parser.add_argument('--update_rate', type=int, default=1,
help='update G more frequently than D')
# Multi-scale prediction: this is not reporeted in the paper
# The main idea is to imporve the flow module by training it to start from
# predict a coarser flow fields (similar to progressive learning GAN
# proposed by Karras et al. ICLR 2017)
parser.add_argument('--num_scale', type=int, default=1,
help='the number of multi-scale flow prediction '
'(1 means without multi-scale prediction)')
parser.add_argument('--moving_weight', type=str, default='uniform',
choices=['uniform', 'shift', 'step'],
help='gradually learn each scale from coarse to fine')
config = parser.parse_args()
if config.dataset == 'car':
import datasets.shapenet_car as dataset
elif config.dataset == 'chair':
import datasets.shapenet_chair as dataset
elif config.dataset == 'kitti':
import datasets.kitti as dataset
elif config.dataset == 'synthia':
import datasets.synthia as dataset
else:
raise ValueError(config.dataset)
if 'car' in config.dataset or 'chair' in config.dataset:
config.dataset_type = 'object'
else:
config.dataset_type = 'scene'
dataset_train, dataset_test = \
dataset.create_default_splits(config.num_input)
image, pose = dataset_train.get_data(dataset_train.ids[0])
config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(pose.shape)])
trainer = Trainer(config, dataset_train, dataset_test)
log.warning("dataset: %s", config.dataset)
trainer.train()
if __name__ == '__main__':
main()
| [
"model.Model",
"datasets.synthia.create_default_splits",
"tensorflow.contrib.framework.get_or_create_global_step",
"six.moves.xrange",
"input_ops.create_input_ops",
"tensorflow.GPUOptions",
"pprint.pprint",
"util.log.warning",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.asarray",
"tens... | [((10788, 10813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10811, 10813), False, 'import argparse\n'), ((16457, 16504), 'datasets.synthia.create_default_splits', 'dataset.create_default_splits', (['config.num_input'], {}), '(config.num_input)\n', (16486, 16504), True, 'import datasets.synthia as dataset\n'), ((16723, 16765), 'util.log.warning', 'log.warning', (['"""dataset: %s"""', 'config.dataset'], {}), "('dataset: %s', config.dataset)\n", (16734, 16765), False, 'from util import log\n'), ((1065, 1107), 'util.log.infov', 'log.infov', (['"""Train Dir: %s"""', 'self.train_dir'], {}), "('Train Dir: %s', self.train_dir)\n", (1074, 1107), False, 'from util import log\n'), ((1212, 1272), 'input_ops.create_input_ops', 'create_input_ops', (['dataset', 'self.batch_size'], {'is_training': '(True)'}), '(dataset, self.batch_size, is_training=True)\n', (1228, 1272), False, 'from input_ops import create_input_ops\n'), ((1315, 1381), 'input_ops.create_input_ops', 'create_input_ops', (['dataset_test', 'self.batch_size'], {'is_training': '(False)'}), '(dataset_test, self.batch_size, is_training=False)\n', (1331, 1381), False, 'from input_ops import create_input_ops\n'), ((1448, 1461), 'model.Model', 'Model', (['config'], {}), '(config)\n', (1453, 1461), False, 'from model import Model\n'), ((1518, 1576), 'tensorflow.contrib.framework.get_or_create_global_step', 'tf.contrib.framework.get_or_create_global_step', ([], {'graph': 'None'}), '(graph=None)\n', (1564, 1576), True, 'import tensorflow as tf\n'), ((1764, 1774), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (1772, 1774), True, 'import tensorflow as tf\n'), ((1839, 1863), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1861, 1863), True, 'import tensorflow as tf\n'), ((1957, 1996), 'util.log.warn', 'log.warn', (['"""********* f_var ********** """'], {}), "('********* f_var ********** ')\n", (1965, 1996), False, 'from util import log\n'), ((2005, 2061), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['f_var'], {'print_info': '(True)'}), '(f_var, print_info=True)\n', (2037, 2061), True, 'import tensorflow.contrib.slim as slim\n'), ((2157, 2196), 'util.log.warn', 'log.warn', (['"""********* p_var ********** """'], {}), "('********* p_var ********** ')\n", (2165, 2196), False, 'from util import log\n'), ((2205, 2261), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['p_var'], {'print_info': '(True)'}), '(p_var, print_info=True)\n', (2237, 2261), True, 'import tensorflow.contrib.slim as slim\n'), ((2350, 2389), 'util.log.warn', 'log.warn', (['"""********* d_var ********** """'], {}), "('********* d_var ********** ')\n", (2358, 2389), False, 'from util import log\n'), ((2398, 2454), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['d_var'], {'print_info': '(True)'}), '(d_var, print_info=True)\n', (2430, 2454), True, 'import tensorflow.contrib.slim as slim\n'), ((3487, 3520), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train"""'}), "(key='train')\n", (3507, 3520), True, 'import tensorflow as tf\n'), ((3552, 3584), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""test"""'}), "(key='test')\n", (3572, 3584), True, 'import tensorflow as tf\n'), ((3607, 3638), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100)'}), '(max_to_keep=100)\n', (3621, 3638), True, 'import tensorflow as tf\n'), ((3669, 3717), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'all_vars', 'max_to_keep': '(1)'}), '(var_list=all_vars, max_to_keep=1)\n', (3683, 3717), True, 'import tensorflow as tf\n'), ((3750, 3795), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'p_var', 'max_to_keep': '(1)'}), '(var_list=p_var, max_to_keep=1)\n', (3764, 3795), True, 'import tensorflow as tf\n'), ((3828, 3873), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'f_var', 'max_to_keep': '(1)'}), '(var_list=f_var, max_to_keep=1)\n', (3842, 3873), True, 'import tensorflow as tf\n'), ((3906, 3951), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'g_var', 'max_to_keep': '(1)'}), '(var_list=g_var, max_to_keep=1)\n', (3920, 3951), True, 'import tensorflow as tf\n'), ((3984, 4029), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'd_var', 'max_to_keep': '(1)'}), '(var_list=d_var, max_to_keep=1)\n', (3998, 4029), True, 'import tensorflow as tf\n'), ((4060, 4097), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.train_dir'], {}), '(self.train_dir)\n', (4081, 4097), True, 'import tensorflow as tf\n'), ((4503, 4730), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'logdir': 'self.train_dir', 'is_chief': '(True)', 'saver': 'None', 'summary_op': 'None', 'summary_writer': 'self.summary_writer', 'save_summaries_secs': '(300)', 'save_model_secs': 'self.checkpoint_secs', 'global_step': 'self.global_step'}), '(logdir=self.train_dir, is_chief=True, saver=None,\n summary_op=None, summary_writer=self.summary_writer,\n save_summaries_secs=300, save_model_secs=self.checkpoint_secs,\n global_step=self.global_step)\n', (4522, 4730), True, 'import tensorflow as tf\n'), ((6724, 6753), 'util.log.infov', 'log.infov', (['"""Training Starts!"""'], {}), "('Training Starts!')\n", (6733, 6753), False, 'from util import log\n'), ((6762, 6786), 'pprint.pprint', 'pprint', (['self.batch_train'], {}), '(self.batch_train)\n', (6768, 6786), False, 'from pprint import pprint\n'), ((7066, 7083), 'six.moves.xrange', 'xrange', (['max_steps'], {}), '(max_steps)\n', (7072, 7083), False, 'from six.moves import xrange\n'), ((8300, 8311), 'time.time', 'time.time', ([], {}), '()\n', (8309, 8311), False, 'import time\n'), ((9209, 9220), 'time.time', 'time.time', ([], {}), '()\n', (9218, 9220), False, 'import time\n'), ((9384, 9395), 'time.time', 'time.time', ([], {}), '()\n', (9393, 9395), False, 'import time\n'), ((9798, 9809), 'time.time', 'time.time', ([], {}), '()\n', (9807, 9809), False, 'import time\n'), ((997, 1027), 'os.path.exists', 'os.path.exists', (['self.train_dir'], {}), '(self.train_dir)\n', (1011, 1027), False, 'import os\n'), ((1029, 1056), 'os.makedirs', 'os.makedirs', (['self.train_dir'], {}), '(self.train_dir)\n', (1040, 1056), False, 'import os\n'), ((5197, 5244), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path'], {}), "('Checkpoint path: %s', self.ckpt_path)\n", (5205, 5244), False, 'from util import log\n'), ((5329, 5405), 'util.log.info', 'log.info', (['"""Loaded the pretrain parameters from the provided checkpoint path"""'], {}), "('Loaded the pretrain parameters from the provided checkpoint path')\n", (5337, 5405), False, 'from util import log\n'), ((5507, 5556), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_f'], {}), "('Checkpoint path: %s', self.ckpt_path_f)\n", (5515, 5556), False, 'from util import log\n'), ((5643, 5720), 'util.log.info', 'log.info', (['"""Loaded the pretrain Flow module from the provided checkpoint path"""'], {}), "('Loaded the pretrain Flow module from the provided checkpoint path')\n", (5651, 5720), False, 'from util import log\n'), ((5822, 5871), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_p'], {}), "('Checkpoint path: %s', self.ckpt_path_p)\n", (5830, 5871), False, 'from util import log\n'), ((5958, 6036), 'util.log.info', 'log.info', (['"""Loaded the pretrain Pixel module from the provided checkpoint path"""'], {}), "('Loaded the pretrain Pixel module from the provided checkpoint path')\n", (5966, 6036), False, 'from util import log\n'), ((6138, 6187), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_g'], {}), "('Checkpoint path: %s', self.ckpt_path_g)\n", (6146, 6187), False, 'from util import log\n'), ((6274, 6379), 'util.log.info', 'log.info', (['"""Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path"""'], {}), "(\n 'Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path'\n )\n", (6282, 6379), False, 'from util import log\n'), ((6471, 6520), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_d'], {}), "('Checkpoint path: %s', self.ckpt_path_d)\n", (6479, 6520), False, 'from util import log\n'), ((6607, 6703), 'util.log.info', 'log.info', (['"""Loaded the pretrain Discriminator module from the provided checkpoint path"""'], {}), "(\n 'Loaded the pretrain Discriminator module from the provided checkpoint path'\n )\n", (6615, 6703), False, 'from util import log\n'), ((16608, 16631), 'numpy.asarray', 'np.asarray', (['image.shape'], {}), '(image.shape)\n', (16618, 16631), True, 'import numpy as np\n'), ((16633, 16655), 'numpy.asarray', 'np.asarray', (['pose.shape'], {}), '(pose.shape)\n', (16643, 16655), True, 'import numpy as np\n'), ((940, 970), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (953, 970), False, 'import time\n'), ((2566, 2610), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_f'], {}), '(self.learning_rate_f)\n', (2588, 2610), True, 'import tensorflow as tf\n'), ((2756, 2800), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_p'], {}), '(self.learning_rate_p)\n', (2778, 2800), True, 'import tensorflow as tf\n'), ((2982, 3037), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_p'], {'beta1': '(0.5)'}), '(self.learning_rate_p, beta1=0.5)\n', (3004, 3037), True, 'import tensorflow as tf\n'), ((3234, 3289), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_d'], {'beta1': '(0.5)'}), '(self.learning_rate_d, beta1=0.5)\n', (3256, 3289), True, 'import tensorflow as tf\n'), ((4931, 4963), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (4944, 4963), True, 'import tensorflow as tf\n'), ((8003, 8041), 'util.log.infov', 'log.infov', (['"""Saved checkpoint at %d"""', 's'], {}), "('Saved checkpoint at %d', s)\n", (8012, 8041), False, 'from util import log\n'), ((8121, 8158), 'os.path.join', 'os.path.join', (['self.train_dir', '"""model"""'], {}), "(self.train_dir, 'model')\n", (8133, 8158), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import fused_layer_norm_cuda
from apex.normalization import FusedLayerNorm
import pyprof
pyprof.init()
pyprof.wrap(fused_layer_norm_cuda, 'forward')
pyprof.wrap(fused_layer_norm_cuda, 'backward')
pyprof.wrap(fused_layer_norm_cuda, 'forward_affine')
pyprof.wrap(fused_layer_norm_cuda, 'backward_affine')
input = torch.randn(20, 5, 10, 10).cuda()
# With Learnable Parameters
m = FusedLayerNorm(input.size()[1:]).cuda()
output = m(input)
# Without Learnable Parameters
m = FusedLayerNorm(input.size()[1:], elementwise_affine=False).cuda()
output = m(input)
# Normalize over last two dimensions
m = FusedLayerNorm([10, 10]).cuda()
output = m(input)
# Normalize over last dimension of size 10
m = FusedLayerNorm(10).cuda()
output = m(input)
| [
"apex.normalization.FusedLayerNorm",
"pyprof.init",
"pyprof.wrap",
"torch.randn"
] | [((761, 774), 'pyprof.init', 'pyprof.init', ([], {}), '()\n', (772, 774), False, 'import pyprof\n'), ((775, 820), 'pyprof.wrap', 'pyprof.wrap', (['fused_layer_norm_cuda', '"""forward"""'], {}), "(fused_layer_norm_cuda, 'forward')\n", (786, 820), False, 'import pyprof\n'), ((821, 867), 'pyprof.wrap', 'pyprof.wrap', (['fused_layer_norm_cuda', '"""backward"""'], {}), "(fused_layer_norm_cuda, 'backward')\n", (832, 867), False, 'import pyprof\n'), ((868, 920), 'pyprof.wrap', 'pyprof.wrap', (['fused_layer_norm_cuda', '"""forward_affine"""'], {}), "(fused_layer_norm_cuda, 'forward_affine')\n", (879, 920), False, 'import pyprof\n'), ((921, 974), 'pyprof.wrap', 'pyprof.wrap', (['fused_layer_norm_cuda', '"""backward_affine"""'], {}), "(fused_layer_norm_cuda, 'backward_affine')\n", (932, 974), False, 'import pyprof\n'), ((984, 1010), 'torch.randn', 'torch.randn', (['(20)', '(5)', '(10)', '(10)'], {}), '(20, 5, 10, 10)\n', (995, 1010), False, 'import torch\n'), ((1271, 1295), 'apex.normalization.FusedLayerNorm', 'FusedLayerNorm', (['[10, 10]'], {}), '([10, 10])\n', (1285, 1295), False, 'from apex.normalization import FusedLayerNorm\n'), ((1369, 1387), 'apex.normalization.FusedLayerNorm', 'FusedLayerNorm', (['(10)'], {}), '(10)\n', (1383, 1387), False, 'from apex.normalization import FusedLayerNorm\n')] |
import unittest
def merge_sort(arr):
def _merge(left, right):
merged_list = []
i, j = 0, 0
while len(left) > i and len(right) > j:
if left[i] < right[j]:
merged_list.append(left[i])
i += 1
else:
merged_list.append(right[j])
j += 1
while len(left) > i:
merged_list.append(left[i])
i += 1
while len(right) > j:
merged_list.append(right[j])
j += 1
return merged_list
def _merge_sort(list):
if len(list) <= 1:
return list
mid = len(list) // 2
left = _merge_sort(list[:mid])
right = _merge_sort(list[mid:])
return _merge(left, right)
result = _merge_sort(arr)
return result
class TestMergeSort(unittest.TestCase):
def test(self):
arr = merge_sort([5, 3, 2, 1, 4])
self.assertEqual(
arr,
[1, 2, 3, 4, 5]
)
arr = merge_sort([-1, -10, 2, 4, 1])
self.assertEqual(
arr,
[-10, -1, 1, 2, 4]
)
arr = merge_sort([1, -1, 5, 2, 4, 2])
self.assertEqual(
arr,
[-1, 1, 2, 2, 4, 5]
)
arr = merge_sort([5, 2])
self.assertEqual(
arr,
[2, 5]
)
arr = merge_sort([-1, -1, 2, 5, 4, 2])
self.assertEqual(
arr,
[-1, -1, 2, 2, 4, 5]
)
if __name__ == '__main__':
unittest.TestCase()
| [
"unittest.TestCase"
] | [((1541, 1560), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (1558, 1560), False, 'import unittest\n')] |
# coding: utf-8
from __future__ import print_function, unicode_literals, absolute_import
import cv2
from .base import BaseTestCase, root
try:
cv2.setNumThreads(1)
except AttributeError:
print('!!! You are using OpenCV which does not allow you to set '
'the number of threads')
class Cv2TestCase(BaseTestCase):
filter_ids = {
cv2.INTER_AREA: 'sup',
cv2.INTER_NEAREST: 'ner',
cv2.INTER_LINEAR: 'bil',
cv2.INTER_CUBIC: 'bic',
cv2.INTER_LANCZOS4: 'lzs4',
}
def create_test_data(self):
im = cv2.imread(root('resources', 'color_circle.png'),
flags=cv2.IMREAD_UNCHANGED)
if self.mode == 'RGB':
im = im[:, :, :3]
elif self.mode == 'RGBA':
pass
elif self.mode == 'L':
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
else:
raise ValueError('Unknown mode: {}'.format(self.mode))
# Fine for upscaling
im = cv2.resize(im, tuple(self.size), interpolation=cv2.INTER_CUBIC)
return [im]
| [
"cv2.setNumThreads",
"cv2.cvtColor"
] | [((151, 171), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(1)'], {}), '(1)\n', (168, 171), False, 'import cv2\n'), ((834, 870), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (846, 870), False, 'import cv2\n')] |
"""Librerias Importadas"""
from flask import Flask
from flask import render_template
from flask import request
App=Flask(__name__)
@App.route('/')
def index():
"""Pagina Principal en donde se introduce el nombre, apellido, comision"""
return render_template('index.html')
@App.route('/porcentaje',methods=['POST'])
def porcentaje():
if request.method=='POST':
""":var file: es la variable que estoy utilizando para acceder al
archivo y copiar en el."""
file=open("archivo.csv","w")
""":var nombre: Donde se guarda el nombre obtenido en el html"""
nombre=request.form['nombre']
""":var apellido: Donde se guarda el apellido obtenido en el html"""
apellido=request.form['apellido']
""":var venta: la variable tipo venta se trae en tipo cadena
y se combierte con el float para poder manipularla"""
venta = float(request.form.get('venta'))
if (venta > 100000):
r = venta * 0.15
elif (venta > 75000):
r = venta * 0.10
elif (venta > 50000):
r = venta * 0.07
elif (venta > 25000):
r = venta * 0.05
else:
r = '¡Usted no ha realizado ventas en el Mes!'
"""Se esta escribiendo en el archivo csv"""
file.write(nombre)
file.write(",")
file.write(apellido)
file.write(",")
file.write(str(venta))
file.write(",")
file.write(str(r))
file.close()
""":return render_templates: es el return que se hace para mandar los valores
al html"""
return render_template('porcentaje.html',nom=nombre,ape=apellido,ven=venta,rr=r)
if __name__=="__main__":
App.run() | [
"flask.render_template",
"flask.request.form.get",
"flask.Flask"
] | [((118, 133), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (123, 133), False, 'from flask import Flask\n'), ((255, 284), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (270, 284), False, 'from flask import render_template\n'), ((1623, 1700), 'flask.render_template', 'render_template', (['"""porcentaje.html"""'], {'nom': 'nombre', 'ape': 'apellido', 'ven': 'venta', 'rr': 'r'}), "('porcentaje.html', nom=nombre, ape=apellido, ven=venta, rr=r)\n", (1638, 1700), False, 'from flask import render_template\n'), ((909, 934), 'flask.request.form.get', 'request.form.get', (['"""venta"""'], {}), "('venta')\n", (925, 934), False, 'from flask import request\n')] |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This library provides a random password generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import flags
from absl import logging
_MIN = 8
_MAX = 100
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'password_length', _MAX,
'The length of the password to be generated for the Grab n Go Role Account.'
'\nNOTE: The length must be between 8 and 100 and must be compliant with '
'the G Suite Admin password settings.\nThe Security Settings can be found '
'in the Google Admin console: admin.google.com'
)
flags.register_validator(
'password_length', lambda length: length >= _MIN and length <= _MAX,
'Password length must be between {} and {} characters.'.format(_MIN, _MAX),
)
def generate(length):
"""Generates a new password of a given length.
Args:
length: int, the length of the password to generate.
Returns:
A random password of type string with the given length.
Raises:
ValueError: if the length provided is invalid.
"""
if length < _MIN or length > _MAX:
raise ValueError(
'password length must be between {!r} and {!r} characters length '
'provided was: {!r}'.format(_MIN, _MAX, length))
logging.debug('Generating a password with length: %r.', length)
chars = (
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'!$%^&*()-_=+@:;~#,.<>? '
)
password = ''
rand = random.SystemRandom()
while len(password) < length:
password += rand.choice(chars)
return password
| [
"absl.flags.DEFINE_integer",
"random.SystemRandom",
"absl.logging.debug"
] | [((871, 1195), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""password_length"""', '_MAX', '"""The length of the password to be generated for the Grab n Go Role Account.\nNOTE: The length must be between 8 and 100 and must be compliant with the G Suite Admin password settings.\nThe Security Settings can be found in the Google Admin console: admin.google.com"""'], {}), '(\'password_length\', _MAX,\n """The length of the password to be generated for the Grab n Go Role Account.\nNOTE: The length must be between 8 and 100 and must be compliant with the G Suite Admin password settings.\nThe Security Settings can be found in the Google Admin console: admin.google.com"""\n )\n', (891, 1195), False, 'from absl import flags\n'), ((1871, 1934), 'absl.logging.debug', 'logging.debug', (['"""Generating a password with length: %r."""', 'length'], {}), "('Generating a password with length: %r.', length)\n", (1884, 1934), False, 'from absl import logging\n'), ((2098, 2119), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (2117, 2119), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
from commons.constants import IMAGES_DIR
from hamcrest import assert_that, is_not, is_, equal_to
from qautils.dataset.dataset_utils import DatasetUtils
from qautils.logger.logger_utils import get_logger
from commons.utils import get_real_value_of_image_property
import os
__copyright__ = "Copyright 2015-2016"
__license__ = " Apache License, Version 2.0"
__logger__ = get_logger("qautils")
def create_new_image(context, region_name, image_name, image_filename=None):
"""
HELPER: Create new image using given params and step context.
:param region_name (string): Name of the node where image will be created
:param context (Behave Context): Behave context
:param image_name (string): Name of the image
:param image_file (string): Filename to be used as image.
:return: None
"""
__logger__.info("Creating new image '%s' in region '%s'. Image filename: '%s'",
image_name, region_name, image_filename)
__dataset_utils__ = DatasetUtils()
# Get Glance Manager for the given region
glance_ops = context.glance_manager_list[region_name]
properties = dict()
if context.table is not None:
for row in __dataset_utils__.prepare_data(context.table):
if 'param_name' in row.headings:
real_value = get_real_value_of_image_property(glance_ops, row['param_value'])
value = real_value if real_value is not None else row['param_value']
properties.update({row['param_name']: value})
# Create new image (pubic=True by default)
is_public = True
if "is_public" in properties:
is_public = properties["is_public"]
properties.pop("is_public")
__logger__.debug("Is the image '%s' public?: '%s'", image_name, is_public)
__logger__.debug("Image properties: '%s'", properties)
# If filename is None, it will be the same as the image_name
image_filename = image_name if image_filename is None else image_filename
__logger__.debug("Image filename to use: '%s'", image_filename)
glance_ops.create_image(image_name, image_filename, custom_properties=properties, is_public=is_public)
context.created_images_list.append(image_name)
def image_is_present_in_nodes(context, region, image_name, filename_content=None, check_master=True):
"""
HELPER: Check if an image is present in the Glance of the given node (region)
:param context (Behave Context): Behave context
:param image_name (string): Name of the image
:param filename_content (string): Filename to be used as image.
:param check_master (bool): If True, check the image in the Glance of Master node.
:return: None
"""
# If region is Master and check_master is False, DO NOT CHECK the presence of the image
if region == context.master_region_name and check_master is False:
return
glance_ops = context.glance_manager_list[region]
images_list = glance_ops.get_images(image_name)
# There is only one image with that name (if region is not Master)
if region == context.master_region_name:
assert_that(len(images_list), is_not(equal_to(0)),
"There aren't images with the name '{}' in '{}' (master) and it should".format(image_name,
region))
else:
assert_that(len(images_list), is_(equal_to(1)),
"There are more/less than ONE image with the name '{}' in '{}'".format(image_name, region))
image = images_list[0]
# The name is the expected
assert_that(image.name, is_(equal_to(image_name)),
"The image name '{}' in '{}' is not the expected one".format(image_name, region))
# Check if the image data is the expected.
sync_img_content = glance_ops.get_data_as_string(image.id)
filename_content = image_name if filename_content is None else filename_content
expected_img_content = ""
current = os.getcwd()
if "tests/acceptance" in current:
image_path = "{}/{}".format(IMAGES_DIR, filename_content)
else:
image_path = "tests/acceptance/{}/{}".format(IMAGES_DIR, filename_content)
file = open(image_path)
for line in file:
expected_img_content += line
assert_that(sync_img_content, is_(equal_to(expected_img_content)),
"The image content for '{}' in '{}' is not the expected content".format(image_name, region))
def image_is_not_present_in_node(context, region, image_name):
"""
HELPER: Check if an image is NOT present in the Glance of the given node (region)
:param context (Behave Context): Behave context
:param region: Node name to check
:param image_name (string): Name of the image
:return: None
"""
glance_ops = context.glance_manager_list[region]
images_list = glance_ops.get_images(image_name)
# There must not be images with the given name
assert_that(len(images_list), is_(equal_to(0)),
"There are images with the name '{}' in '{}', and it sloudn't".format(image_name, region))
| [
"commons.utils.get_real_value_of_image_property",
"os.getcwd",
"qautils.dataset.dataset_utils.DatasetUtils",
"qautils.logger.logger_utils.get_logger",
"hamcrest.equal_to"
] | [((1148, 1169), 'qautils.logger.logger_utils.get_logger', 'get_logger', (['"""qautils"""'], {}), "('qautils')\n", (1158, 1169), False, 'from qautils.logger.logger_utils import get_logger\n'), ((1762, 1776), 'qautils.dataset.dataset_utils.DatasetUtils', 'DatasetUtils', ([], {}), '()\n', (1774, 1776), False, 'from qautils.dataset.dataset_utils import DatasetUtils\n'), ((4773, 4784), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4782, 4784), False, 'import os\n'), ((4411, 4431), 'hamcrest.equal_to', 'equal_to', (['image_name'], {}), '(image_name)\n', (4419, 4431), False, 'from hamcrest import assert_that, is_not, is_, equal_to\n'), ((5110, 5140), 'hamcrest.equal_to', 'equal_to', (['expected_img_content'], {}), '(expected_img_content)\n', (5118, 5140), False, 'from hamcrest import assert_that, is_not, is_, equal_to\n'), ((5773, 5784), 'hamcrest.equal_to', 'equal_to', (['(0)'], {}), '(0)\n', (5781, 5784), False, 'from hamcrest import assert_that, is_not, is_, equal_to\n'), ((2080, 2144), 'commons.utils.get_real_value_of_image_property', 'get_real_value_of_image_property', (['glance_ops', "row['param_value']"], {}), "(glance_ops, row['param_value'])\n", (2112, 2144), False, 'from commons.utils import get_real_value_of_image_property\n'), ((3909, 3920), 'hamcrest.equal_to', 'equal_to', (['(0)'], {}), '(0)\n', (3917, 3920), False, 'from hamcrest import assert_that, is_not, is_, equal_to\n'), ((4194, 4205), 'hamcrest.equal_to', 'equal_to', (['(1)'], {}), '(1)\n', (4202, 4205), False, 'from hamcrest import assert_that, is_not, is_, equal_to\n')] |
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
import store_api.urls
import store_ui.urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(store_api.urls.urlpatterns)),
url(r'^.*$', include(store_ui.urls.urlpatterns))
]
| [
"django.conf.urls.include"
] | [((203, 227), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (210, 227), False, 'from django.conf.urls import include, url\n'), ((248, 283), 'django.conf.urls.include', 'include', (['store_api.urls.urlpatterns'], {}), '(store_api.urls.urlpatterns)\n', (255, 283), False, 'from django.conf.urls import include, url\n'), ((303, 337), 'django.conf.urls.include', 'include', (['store_ui.urls.urlpatterns'], {}), '(store_ui.urls.urlpatterns)\n', (310, 337), False, 'from django.conf.urls import include, url\n')] |
import pp
def test_mzi():
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
arm_bot:
component: mzi_arm
placements:
arm_bot:
mirror: [0,0,0,10]
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
return pp.component_from_yaml(netlist)
if __name__ == "__main__":
c = test_mzi()
pp.show(c)
pp.plotgds(c)
| [
"pp.component_from_yaml",
"pp.plotgds",
"pp.show"
] | [((663, 694), 'pp.component_from_yaml', 'pp.component_from_yaml', (['netlist'], {}), '(netlist)\n', (685, 694), False, 'import pp\n'), ((747, 757), 'pp.show', 'pp.show', (['c'], {}), '(c)\n', (754, 757), False, 'import pp\n'), ((762, 775), 'pp.plotgds', 'pp.plotgds', (['c'], {}), '(c)\n', (772, 775), False, 'import pp\n')] |
import numpy as np
import scipy.linalg as la
from scipy.stats import multinomial
def random_multivar_normal(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A A^T + sigma^2 I.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the covariance matrix for the distibution
A0: d x d array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
cov = A0 @ A0.T + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, A0, X
def spiked_covariance(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A D A^T + sigma^2 I.
Here A is a set of k orthogonal vectors and D is a diagonal matrix with
random, uniform entries, sorted and scaled so that the first entry = 1.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the true covariance matrix for the distibution
w: d vector of the diagonal values from matrix D.
A0: d x k array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
w = np.sort(np.random.rand(k, 1), axis=0)[::-1]
w /= w.max()
cov = A0 @ (w**2 * A0.T) + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, w, A0, X
def random_multinomial(n, d, trials=100, mean0 = True, scale=1):
'''
Generate random samples from a random multinomial distribution with p_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
trials: optional int, the number of trials for each sample from the
multinomial distribution default is 100.
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize p values
p = np.random.rand(d)
p /= p.sum()
# Calculate the covariance matrix for the multinomial distribution
# For large d > 10000, use multinomial.cov(d,p)
if d >= 10000:
cov = multinomial.cov(trials, p)
else:
cov = -np.outer(p, p) * trials
cov[np.diag_indices(d)] = trials * p * (1-p)
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
# Obtain a sample from the multinomial distribution of size n
X = np.random.multinomial(trials, p, n).astype(float)
if mean0:
# Let X have mean 0
X -= trials * p
X *= scale
return cov, e, v, X
def random_dirichlet(n, d, mean0=True, scale=1):
'''
Generate random samples from a random dirichlet distribution with a_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
(note the eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize a random set of parameters a drawn from the
# uniform distribution.
a = np.random.rand(d)
a0 = a.sum()
a_denom = a0**2 * (a0 + 1)
# Obtain the covariance matrix for the dirichlet distribution.
# Note that scipy doesn't currently have a builtin method for this
# (I may add one myself)
cov = -np.outer(a, a) / a_denom # i neq j case
cov[np.diag_indices(d)] = a * (a0 - a) / a_denom # i = j case
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
X = np.random.dirichlet(a, n)
if mean0:
X -= a / a0
X *= scale
return cov, e, v, X
| [
"scipy.linalg.eigh",
"numpy.eye",
"numpy.random.rand",
"numpy.diag_indices",
"scipy.stats.multinomial.cov",
"numpy.random.multinomial",
"numpy.random.dirichlet",
"numpy.zeros",
"numpy.outer"
] | [((2998, 3015), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (3012, 3015), True, 'import numpy as np\n'), ((3408, 3420), 'scipy.linalg.eigh', 'la.eigh', (['cov'], {}), '(cov)\n', (3415, 3420), True, 'import scipy.linalg as la\n'), ((4563, 4580), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (4577, 4580), True, 'import numpy as np\n'), ((5002, 5014), 'scipy.linalg.eigh', 'la.eigh', (['cov'], {}), '(cov)\n', (5009, 5014), True, 'import scipy.linalg as la\n'), ((5058, 5083), 'numpy.random.dirichlet', 'np.random.dirichlet', (['a', 'n'], {}), '(a, n)\n', (5077, 5083), True, 'import numpy as np\n'), ((905, 916), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (913, 916), True, 'import numpy as np\n'), ((2068, 2079), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (2076, 2079), True, 'import numpy as np\n'), ((3190, 3216), 'scipy.stats.multinomial.cov', 'multinomial.cov', (['trials', 'p'], {}), '(trials, p)\n', (3205, 3216), False, 'from scipy.stats import multinomial\n'), ((4856, 4874), 'numpy.diag_indices', 'np.diag_indices', (['d'], {}), '(d)\n', (4871, 4874), True, 'import numpy as np\n'), ((782, 802), 'numpy.random.rand', 'np.random.rand', (['d', 'k'], {}), '(d, k)\n', (796, 802), True, 'import numpy as np\n'), ((857, 866), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (863, 866), True, 'import numpy as np\n'), ((1866, 1886), 'numpy.random.rand', 'np.random.rand', (['d', 'k'], {}), '(d, k)\n', (1880, 1886), True, 'import numpy as np\n'), ((1924, 1944), 'numpy.random.rand', 'np.random.rand', (['k', '(1)'], {}), '(k, 1)\n', (1938, 1944), True, 'import numpy as np\n'), ((2020, 2029), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2026, 2029), True, 'import numpy as np\n'), ((3278, 3296), 'numpy.diag_indices', 'np.diag_indices', (['d'], {}), '(d)\n', (3293, 3296), True, 'import numpy as np\n'), ((3530, 3565), 'numpy.random.multinomial', 'np.random.multinomial', (['trials', 'p', 'n'], {}), '(trials, p, n)\n', (3551, 3565), True, 'import numpy as np\n'), ((4808, 4822), 'numpy.outer', 'np.outer', (['a', 'a'], {}), '(a, a)\n', (4816, 4822), True, 'import numpy as np\n'), ((3242, 3256), 'numpy.outer', 'np.outer', (['p', 'p'], {}), '(p, p)\n', (3250, 3256), True, 'import numpy as np\n')] |
"""livestream API tests"""
from livestream.api import get_upcoming_events
def test_get_upcoming_events(settings, mocker):
"""test get upcoming events"""
settings.LIVESTREAM_ACCOUNT_ID = 392_239
settings.LIVESTREAM_SECRET_KEY = "secret key"
requests_patch = mocker.patch("requests.get", autospec=True)
resp = get_upcoming_events()
requests_patch.assert_called_once_with(
f"https://livestreamapis.com/v3/accounts/{settings.LIVESTREAM_ACCOUNT_ID}/upcoming_events",
auth=(settings.LIVESTREAM_SECRET_KEY, ""),
)
assert resp == requests_patch.return_value
| [
"livestream.api.get_upcoming_events"
] | [((330, 351), 'livestream.api.get_upcoming_events', 'get_upcoming_events', ([], {}), '()\n', (349, 351), False, 'from livestream.api import get_upcoming_events\n')] |
'''
Created on 15.2.2017
@author: sapejura
'''
import io
import threading
import struct
class FrameQueue(io.IOBase):
def __init__(self, frame_size):
super().__init__()
self.queue_lock = threading.Lock()
self._queue = bytearray()
self._store_mode = 0
self.frame_size = frame_size
# self.remaining = bytearray()
# self.memview = memoryview(self.remaining)
def readable(self):
return True
def writable(self):
return True
def put(self, b):
self._queue.extend(b)
return len(b)
def get(self, n=-1):
'''
Depending on the FrameQueue mode this method returns
different bytearray.
mode 1:
makes the queue FIFO and returns n bytes from it.
n=-1 returns everything.
mode 2:
makes the queue LILO and returns the last full
'frame_size' bytes frame. Otherwise returns b''.
'''
if self._store_mode == 1:
if n < 0:
b = self._queue[:]
del self._queue[:]
else:
b = self._queue[:n]
del self._queue[:n]
elif self._store_mode == 2:
# Returns the last full frame and
# removes it and everything come before that
l = len(self._queue)
frames = int(l/self.frame_size)
start = max((frames-1)*self.frame_size, 0)
end = start + self.frame_size
if l >= end-start:
b = self._queue[start:end]
del self._queue[:end]
else:
b = b''
else:
b = b''
return b
def set_mode(self, byte):
m = struct.unpack('B', byte)[0]
if m in [0, 1, 2]:
self._store_mode = m
else:
raise Exception('Illegal mode parameter.')
def buffer_size(self):
return len(self._queue)
def is_empty(self):
size = len(self._queue)
return size == 0
def clear_queue(self):
self._queue = bytearray()
| [
"threading.Lock",
"struct.unpack"
] | [((210, 226), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (224, 226), False, 'import threading\n'), ((1744, 1768), 'struct.unpack', 'struct.unpack', (['"""B"""', 'byte'], {}), "('B', byte)\n", (1757, 1768), False, 'import struct\n')] |
#!/usr/bin/python
import numpy as np
import os
import pymaster as nmt
import pytest
import tjpcov.main as cv
from tjpcov.parser import parse
import yaml
import sacc
root = "./tests/benchmarks/32_DES_tjpcov_bm/"
input_yml = os.path.join(root, "tjpcov_conf_minimal.yaml")
input_yml_no_nmtc = os.path.join(root, "tjpcov_conf_minimal_no_nmtconf.yaml")
xcell_yml = os.path.join(root, "desy1_tjpcov_bm.yml")
def get_xcell_yml():
with open(xcell_yml) as f:
config = yaml.safe_load(f)
return config
def get_nmt_bin():
bpw_edges = [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96]
return nmt.NmtBin.from_edges(bpw_edges[:-1], bpw_edges[1:])
def get_pair_folder_name(tracer_comb):
bn = []
for tr in tracer_comb:
bn.append(tr.split('__')[0])
return '_'.join(bn)
def get_data_cl(tr1, tr2, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_fiducial_cl(s, tr1, tr2, binned=True, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, 'fiducial', bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
if binned:
s = s.copy()
s.remove_selection(data_type='cl_0b')
s.remove_selection(data_type='cl_eb')
s.remove_selection(data_type='cl_be')
s.remove_selection(data_type='cl_bb')
ix = s.indices(tracers=(tr1, tr2))
bpw = s.get_bandpower_windows(ix)
cl0_bin = bpw.weight.T.dot(cl[0])
cl_bin = np.zeros((cl.shape[0], cl0_bin.size))
cl_bin[0] = cl0_bin
cl = cl_bin
else:
cl
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_tracer_noise(tr, cp=True):
bn = get_pair_folder_name((tr, tr))
fname = os.path.join(root, bn, f"cl_{tr}_{tr}.npz")
clfile = np.load(fname)
if cp:
return clfile['nl_cp'][0, -1]
else:
return clfile['nl'][0, 0]
def get_benchmark_cov(tracer_comb1, tracer_comb2):
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
fname = os.path.join(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')
return np.load(fname)['cov']
def get_workspace(tr1, tr2):
config = get_xcell_yml()
w = nmt.NmtWorkspace()
bn = get_pair_folder_name((tr1, tr2))
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
fname = os.path.join(root, bn, f"w__{m1}__{m2}.fits")
w.read_from(fname)
return w
def get_covariance_workspace(tr1, tr2, tr3, tr4):
config = get_xcell_yml()
cw = nmt.NmtCovarianceWorkspace()
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
m3 = config['tracers'][tr3]['mask_name']
m4 = config['tracers'][tr4]['mask_name']
fname = os.path.join(root, 'cov', f"cw__{m1}__{m2}__{m3}__{m4}.fits")
cw.read_from(fname)
return cw
def assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, threshold):
cl1 = get_data_cl(*tracer_comb1, remove_be=True)
cl2 = get_data_cl(*tracer_comb2, remove_be=True)
clf1 = get_fiducial_cl(s, *tracer_comb1, remove_be=True)
clf2 = get_fiducial_cl(s, *tracer_comb2, remove_be=True)
ndim, nbpw = cl1.shape
# This only runs if tracer_comb1 = tracer_comb2 (when the block covariance
# is invertible)
if (tracer_comb1[0] == tracer_comb1[1]) and (ndim == 3):
cov = cov.reshape((nbpw, 4, nbpw, 4))
cov = np.delete(np.delete(cov, 2, 1), 2, 3).reshape(3 * nbpw, -1)
cov_bm = cov_bm.reshape((nbpw, 4, nbpw, 4))
cov_bm = np.delete(np.delete(cov_bm, 2, 1), 2, 3).reshape(3 * nbpw, -1)
delta1 = (clf1 - cl1).flatten()
delta2 = (clf2 - cl2).flatten()
chi2 = delta1.dot(np.linalg.inv(cov)).dot(delta2)
chi2_bm = delta1.dot(np.linalg.inv(cov_bm)).dot(delta2)
assert np.abs(chi2 / chi2_bm - 1) < threshold
def test_nmt_conf_missing():
"""
Check that input file might not have nmt_conf and it still works
"""
tjpcov_class = cv.CovarianceCalculator(input_yml_no_nmtc)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
tracer_comb1 = tracer_comb2 = ('DESgc__0', 'DESgc__0')
cache = {'bins': get_nmt_bin()}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_noise,
cache=cache)['final'] + 1e-100
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov(tracer_comb1, tracer_comb2):
# tjpcov_class = cv.CovarianceCalculator(input_yml)
# cache = {'bins': get_nmt_bin()}
config, _= parse(input_yml)
bins = get_nmt_bin()
config['tjpcov']['binning_info'] = bins
tjpcov_class = cv.CovarianceCalculator(config)
cache = None
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final']
# Cov with coupled noise (as in benchmark)
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
# Test error with 'bins' in cache different to that at initialization
with pytest.raises(ValueError):
cache2 = {'bins': nmt.NmtBin.from_nside_linear(32, bins.get_n_bands())}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final']
# Test it runs with 'bins' in cache if they are the same
cache2 = {'bins': bins}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final'] + 1e-100
assert np.all(cov == cov2)
# Cov with uncoupled noise cannot be used for benchmark as tracer_noise is
# assumed to be flat but it is not when computed from the coupled due to
# edge effects
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
# Check that it runs if one of the masks does not overlap with the others
if tracer_comb1 != tracer_comb2:
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
tjpcov_class.mask_fn[tracer_comb1[0]] = \
'./tests/benchmarks/32_DES_tjpcov_bm/catalogs/mask_nonoverlapping.fits.gz'
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov_cache(tracer_comb1, tracer_comb2):
tjpcov_class = cv.CovarianceCalculator(input_yml)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
s = None # Not needed if binned=False
cl13 = get_fiducial_cl(s, tr1, tr3, binned=False)
cl24 = get_fiducial_cl(s, tr2, tr4, binned=False)
cl14 = get_fiducial_cl(s, tr1, tr4, binned=False)
cl23 = get_fiducial_cl(s, tr2, tr3, binned=False)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
# 'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
# 'w12': w12, 'w34': w34,
# 'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
w13 = get_workspace(tr1, tr3)
w23 = get_workspace(tr2, tr3)
w14 = get_workspace(tr1, tr4)
w24 = get_workspace(tr2, tr4)
w12 = get_workspace(tr1, tr2)
w34 = get_workspace(tr3, tr4)
cw = get_covariance_workspace(*tracer_comb1, *tracer_comb2)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
'w12': w12, 'w34': w34,
'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-6
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-6
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-6)
def test_get_all_cov_nmt():
tjpcov_class = cv.CovarianceCalculator(input_yml)
s = tjpcov_class.cl_data
bins = get_nmt_bin()
tracer_noise = {}
for tr in s.tracers:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.get_all_cov_nmt(tracer_noise=tracer_noise,
tracer_noise_coupled=tracer_noise,
cache={'bins': bins})
cov = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
cov_bm = s.covariance.covmat + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-3
# Check chi2
clf = np.array([])
for trs in s.get_tracer_combinations():
cl_trs = get_fiducial_cl(s, *trs, remove_be=True)
clf = np.concatenate((clf, cl_trs.flatten()))
cl = s.mean
delta = clf - cl
chi2 = delta.dot(np.linalg.inv(cov)).dot(delta)
chi2_bm = delta.dot(np.linalg.inv(cov_bm)).dot(delta)
assert np.abs(chi2 / chi2_bm - 1) < 1e-5
# Check that it also works if they don't use concise data_types
s2 = s.copy()
for dp in s2.data:
dt = dp.data_type
if dt == 'cl_00':
dp.data_type = sacc.standard_types.galaxy_density_cl
elif dt == 'cl_0e':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_e
elif dt == 'cl_0b':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_b
elif dt == 'cl_ee':
dp.data_type = sacc.standard_types.galaxy_shear_cl_ee
elif dt == 'cl_eb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_eb
elif dt == 'cl_be':
dp.data_type = sacc.standard_types.galaxy_shear_cl_be
elif dt == 'cl_bb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_bb
else:
raise ValueError('Something went wrong. Data type not recognized')
tjpcov_class.cl_data = s2
cov2 = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
assert np.all(cov == cov2)
# Clean up after the tests
os.system("rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/")
| [
"pymaster.NmtWorkspace",
"numpy.abs",
"pymaster.NmtCovarianceWorkspace",
"numpy.delete",
"os.path.join",
"tjpcov.parser.parse",
"numpy.diag",
"pytest.mark.parametrize",
"numpy.array",
"yaml.safe_load",
"numpy.zeros",
"pytest.raises",
"numpy.linalg.inv",
"os.system",
"numpy.all",
"numpy... | [((225, 271), 'os.path.join', 'os.path.join', (['root', '"""tjpcov_conf_minimal.yaml"""'], {}), "(root, 'tjpcov_conf_minimal.yaml')\n", (237, 271), False, 'import os\n'), ((292, 349), 'os.path.join', 'os.path.join', (['root', '"""tjpcov_conf_minimal_no_nmtconf.yaml"""'], {}), "(root, 'tjpcov_conf_minimal_no_nmtconf.yaml')\n", (304, 349), False, 'import os\n'), ((362, 403), 'os.path.join', 'os.path.join', (['root', '"""desy1_tjpcov_bm.yml"""'], {}), "(root, 'desy1_tjpcov_bm.yml')\n", (374, 403), False, 'import os\n'), ((4695, 5037), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tracer_comb1,tracer_comb2"""', "[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0',\n 'DESwl__0'), ('DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))]"], {}), "('tracer_comb1,tracer_comb2', [(('DESgc__0',\n 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0', 'DESwl__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')), ((\n 'DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))])\n", (4718, 5037), False, 'import pytest\n'), ((8540, 8882), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tracer_comb1,tracer_comb2"""', "[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0',\n 'DESwl__0'), ('DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))]"], {}), "('tracer_comb1,tracer_comb2', [(('DESgc__0',\n 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0', 'DESwl__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')), ((\n 'DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))])\n", (8563, 8882), False, 'import pytest\n'), ((14178, 14245), 'os.system', 'os.system', (['"""rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/"""'], {}), "('rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/')\n", (14187, 14245), False, 'import os\n'), ((627, 679), 'pymaster.NmtBin.from_edges', 'nmt.NmtBin.from_edges', (['bpw_edges[:-1]', 'bpw_edges[1:]'], {}), '(bpw_edges[:-1], bpw_edges[1:])\n', (648, 679), True, 'import pymaster as nmt\n'), ((921, 966), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""cl_{tr1}_{tr2}.npz"""'], {}), "(root, bn, f'cl_{tr1}_{tr2}.npz')\n", (933, 966), False, 'import os\n'), ((1252, 1309), 'os.path.join', 'os.path.join', (['root', '"""fiducial"""', 'bn', 'f"""cl_{tr1}_{tr2}.npz"""'], {}), "(root, 'fiducial', bn, f'cl_{tr1}_{tr2}.npz')\n", (1264, 1309), False, 'import os\n'), ((2037, 2080), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""cl_{tr}_{tr}.npz"""'], {}), "(root, bn, f'cl_{tr}_{tr}.npz')\n", (2049, 2080), False, 'import os\n'), ((2094, 2108), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2101, 2108), True, 'import numpy as np\n'), ((2323, 2384), 'os.path.join', 'os.path.join', (['root', '"""cov"""', 'f"""cov_{tr1}_{tr2}_{tr3}_{tr4}.npz"""'], {}), "(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')\n", (2335, 2384), False, 'import os\n'), ((2486, 2504), 'pymaster.NmtWorkspace', 'nmt.NmtWorkspace', ([], {}), '()\n', (2502, 2504), True, 'import pymaster as nmt\n'), ((2649, 2694), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""w__{m1}__{m2}.fits"""'], {}), "(root, bn, f'w__{m1}__{m2}.fits')\n", (2661, 2694), False, 'import os\n'), ((2821, 2849), 'pymaster.NmtCovarianceWorkspace', 'nmt.NmtCovarianceWorkspace', ([], {}), '()\n', (2847, 2849), True, 'import pymaster as nmt\n'), ((3042, 3103), 'os.path.join', 'os.path.join', (['root', '"""cov"""', 'f"""cw__{m1}__{m2}__{m3}__{m4}.fits"""'], {}), "(root, 'cov', f'cw__{m1}__{m2}__{m3}__{m4}.fits')\n", (3054, 3103), False, 'import os\n'), ((4260, 4302), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml_no_nmtc'], {}), '(input_yml_no_nmtc)\n', (4283, 4302), True, 'import tjpcov.main as cv\n'), ((5342, 5358), 'tjpcov.parser.parse', 'parse', (['input_yml'], {}), '(input_yml)\n', (5347, 5358), False, 'from tjpcov.parser import parse\n'), ((5447, 5478), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['config'], {}), '(config)\n', (5470, 5478), True, 'import tjpcov.main as cv\n'), ((7534, 7553), 'numpy.all', 'np.all', (['(cov == cov2)'], {}), '(cov == cov2)\n', (7540, 7553), True, 'import numpy as np\n'), ((9102, 9136), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml'], {}), '(input_yml)\n', (9125, 9136), True, 'import tjpcov.main as cv\n'), ((11853, 11887), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml'], {}), '(input_yml)\n', (11876, 11887), True, 'import tjpcov.main as cv\n'), ((12683, 12695), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12691, 12695), True, 'import numpy as np\n'), ((14130, 14149), 'numpy.all', 'np.all', (['(cov == cov2)'], {}), '(cov == cov2)\n', (14136, 14149), True, 'import numpy as np\n'), ((475, 492), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (489, 492), False, 'import yaml\n'), ((976, 990), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (983, 990), True, 'import numpy as np\n'), ((1098, 1117), 'numpy.delete', 'np.delete', (['cl', '(2)', '(0)'], {}), '(cl, 2, 0)\n', (1107, 1117), True, 'import numpy as np\n'), ((1319, 1333), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (1326, 1333), True, 'import numpy as np\n'), ((1706, 1743), 'numpy.zeros', 'np.zeros', (['(cl.shape[0], cl0_bin.size)'], {}), '((cl.shape[0], cl0_bin.size))\n', (1714, 1743), True, 'import numpy as np\n'), ((1914, 1933), 'numpy.delete', 'np.delete', (['cl', '(2)', '(0)'], {}), '(cl, 2, 0)\n', (1923, 1933), True, 'import numpy as np\n'), ((2396, 2410), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2403, 2410), True, 'import numpy as np\n'), ((4086, 4112), 'numpy.abs', 'np.abs', (['(chi2 / chi2_bm - 1)'], {}), '(chi2 / chi2_bm - 1)\n', (4092, 4112), True, 'import numpy as np\n'), ((5741, 5766), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5754, 5766), False, 'import pytest\n'), ((6702, 6727), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6715, 6727), False, 'import pytest\n'), ((7995, 8062), 'os.system', 'os.system', (['"""rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*"""'], {}), "('rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*')\n", (8004, 8062), False, 'import os\n'), ((8469, 8536), 'os.system', 'os.system', (['"""rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*"""'], {}), "('rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*')\n", (8478, 8536), False, 'import os\n'), ((12107, 12132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12120, 12132), False, 'import pytest\n'), ((13011, 13037), 'numpy.abs', 'np.abs', (['(chi2 / chi2_bm - 1)'], {}), '(chi2 / chi2_bm - 1)\n', (13017, 13037), True, 'import numpy as np\n'), ((6585, 6609), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (6591, 6609), True, 'import numpy as np\n'), ((10447, 10471), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (10453, 10471), True, 'import numpy as np\n'), ((11632, 11656), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (11638, 11656), True, 'import numpy as np\n'), ((12622, 12646), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (12628, 12646), True, 'import numpy as np\n'), ((3981, 3999), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (3994, 3999), True, 'import numpy as np\n'), ((4038, 4059), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_bm'], {}), '(cov_bm)\n', (4051, 4059), True, 'import numpy as np\n'), ((12911, 12929), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (12924, 12929), True, 'import numpy as np\n'), ((12966, 12987), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_bm'], {}), '(cov_bm)\n', (12979, 12987), True, 'import numpy as np\n'), ((3704, 3724), 'numpy.delete', 'np.delete', (['cov', '(2)', '(1)'], {}), '(cov, 2, 1)\n', (3713, 3724), True, 'import numpy as np\n'), ((3833, 3856), 'numpy.delete', 'np.delete', (['cov_bm', '(2)', '(1)'], {}), '(cov_bm, 2, 1)\n', (3842, 3856), True, 'import numpy as np\n'), ((6523, 6535), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (6530, 6535), True, 'import numpy as np\n'), ((6538, 6553), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (6545, 6553), True, 'import numpy as np\n'), ((10385, 10397), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (10392, 10397), True, 'import numpy as np\n'), ((10400, 10415), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (10407, 10415), True, 'import numpy as np\n'), ((11570, 11582), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (11577, 11582), True, 'import numpy as np\n'), ((11585, 11600), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (11592, 11600), True, 'import numpy as np\n'), ((12560, 12572), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (12567, 12572), True, 'import numpy as np\n'), ((12575, 12590), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (12582, 12590), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from os.path import join
import requests
from bs4 import BeautifulSoup
SCIKIT_LEARN_BASE_URL = 'http://scikit-learn.org/stable/auto_examples/'
SCIKIT_INDEX_URL = 'http://scikit-learn.org/stable/auto_examples/index.html'
def download_file(fetch_me):
"""
Fetches a file in given url into the 'download' directory
Args:
fetch_me: URL to file
Returns:
local_filename: Path to local version of the downloaded file.
"""
local_filename = join('download', fetch_me.split('/')[-1])
r = requests.get(fetch_me, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
if __name__ == "__main__":
local_file = download_file(SCIKIT_INDEX_URL)
with open(local_file, 'r') as index:
soup = BeautifulSoup(index, 'html.parser')
# Download everything in found in "examples" list
for example_link in soup.select('#examples .caption-text a'):
href_path = join(SCIKIT_LEARN_BASE_URL, example_link.get('href'))
download_file(href_path)
| [
"bs4.BeautifulSoup",
"requests.get"
] | [((550, 585), 'requests.get', 'requests.get', (['fetch_me'], {'stream': '(True)'}), '(fetch_me, stream=True)\n', (562, 585), False, 'import requests\n'), ((895, 930), 'bs4.BeautifulSoup', 'BeautifulSoup', (['index', '"""html.parser"""'], {}), "(index, 'html.parser')\n", (908, 930), False, 'from bs4 import BeautifulSoup\n')] |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint, getdate
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
msgprint(_("Please select date"), raise_exception=1)
columns = get_columns(filters)
active_student_group = get_active_student_group()
data = []
for student_group in active_student_group:
row = [student_group.name]
present_students = 0
absent_students = 0
student_group_strength = get_student_group_strength(student_group.name)
student_attendance = get_student_attendance(student_group.name, filters.get("date"))
if student_attendance:
for attendance in student_attendance:
if attendance.status== "Present":
present_students = attendance.count
elif attendance.status== "Absent":
absent_students = attendance.count
unmarked_students = student_group_strength - (present_students + absent_students)
row+= [student_group_strength, present_students, absent_students, unmarked_students]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Student Group") + ":Link/Student Group:250",
_("Student Group Strength") + "::170",
_("Present") + "::90",
_("Absent") + "::90",
_("Not Marked") + "::90"
]
return columns
def get_active_student_group():
active_student_groups = frappe.db.sql("""select name from `tabStudent Group` where group_based_on = "Batch"
and academic_year=%s order by name""", (frappe.defaults.get_defaults().academic_year), as_dict=1)
return active_student_groups
def get_student_group_strength(student_group):
student_group_strength = frappe.db.sql("""select count(*) from `tabStudent Group Student`
where parent = %s and active=1""", student_group)[0][0]
return student_group_strength
def get_student_attendance(student_group, date):
student_attendance = frappe.db.sql("""select count(*) as count, status from `tabStudent Attendance` where \
student_group= %s and date= %s and\
(course_schedule is Null or course_schedule='') group by status""",
(student_group, date), as_dict=1)
return student_attendance | [
"frappe.defaults.get_defaults",
"frappe.db.sql",
"frappe._"
] | [((2020, 2262), 'frappe.db.sql', 'frappe.db.sql', (['"""select count(*) as count, status from `tabStudent Attendance` where \t\t\t\tstudent_group= %s and date= %s and\t\t\t\t(course_schedule is Null or course_schedule=\'\') group by status"""', '(student_group, date)'], {'as_dict': '(1)'}), '(\n "select count(*) as count, status from `tabStudent Attendance` where \\t\\t\\t\\tstudent_group= %s and date= %s and\\t\\t\\t\\t(course_schedule is Null or course_schedule=\'\') group by status"\n , (student_group, date), as_dict=1)\n', (2033, 2262), False, 'import frappe\n'), ((357, 380), 'frappe._', '_', (['"""Please select date"""'], {}), "('Please select date')\n", (358, 380), False, 'from frappe import msgprint, _\n'), ((1259, 1277), 'frappe._', '_', (['"""Student Group"""'], {}), "('Student Group')\n", (1260, 1277), False, 'from frappe import msgprint, _\n'), ((1310, 1337), 'frappe._', '_', (['"""Student Group Strength"""'], {}), "('Student Group Strength')\n", (1311, 1337), False, 'from frappe import msgprint, _\n'), ((1352, 1364), 'frappe._', '_', (['"""Present"""'], {}), "('Present')\n", (1353, 1364), False, 'from frappe import msgprint, _\n'), ((1378, 1389), 'frappe._', '_', (['"""Absent"""'], {}), "('Absent')\n", (1379, 1389), False, 'from frappe import msgprint, _\n'), ((1402, 1417), 'frappe._', '_', (['"""Not Marked"""'], {}), "('Not Marked')\n", (1403, 1417), False, 'from frappe import msgprint, _\n'), ((1631, 1661), 'frappe.defaults.get_defaults', 'frappe.defaults.get_defaults', ([], {}), '()\n', (1659, 1661), False, 'import frappe\n'), ((1793, 1920), 'frappe.db.sql', 'frappe.db.sql', (['"""select count(*) from `tabStudent Group Student` \n\t\twhere parent = %s and active=1"""', 'student_group'], {}), '(\n """select count(*) from `tabStudent Group Student` \n\t\twhere parent = %s and active=1"""\n , student_group)\n', (1806, 1920), False, 'import frappe\n')] |
import logging
from bottle import Bottle, request, response, abort, static_file
import os
import time
import threading
from threading import Thread
from pathlib import Path
import json
import subprocess
import io
import sys
import signal
from internal.notifier import getNotifier, NotificationLevel
from internal.interprocess_message import SubprocessToParentMessage
from machine_app import MachineAppEngine
import paho.mqtt.subscribe as MQTTsubscribe
import paho.mqtt.client as mqtt
import traceback
class RestServer(Bottle):
'''
RESTful server that handles control of the MachineApp and configuration IO
'''
def __init__(self):
super(RestServer, self).__init__()
self.__clientDirectory = os.path.join('..', 'client')
self.__serverDirectory = os.path.join('.')
self.__logger = logging.getLogger(__name__)
self.__notifier = getNotifier()
self.__subprocess = MachineAppSubprocessManager()
self.__estopManager = EstopManager(self.onEstopEntered)
self.isPaused = False # TODO: It would be better to no track isPaused here
# Set up callbacks
self.route('/', callback=self.index)
self.route('/ping', callback=self.ping)
self.route('/<filepath:path>', callback=self.serveStatic)
self.route('/run/start', method='POST', callback=self.start)
self.route('/run/stop', method='POST', callback=self.stop)
self.route('/run/pause', method='POST', callback=self.pause)
self.route('/run/resume', method='POST', callback=self.resume)
self.route('/run/estop', method='POST', callback=self.estop)
self.route('/run/estop', method='GET', callback=self.getEstop)
self.route('/run/releaseEstop', method='POST', callback=self.releaseEstop)
self.route('/run/resetSystem', method='POST', callback=self.resetSystem)
self.route('/run/state', method='GET', callback=self.getState)
self.route('/run/message', method='POST', callback=self.sendMessage)
self.route('/kill', method='GET', callback=self.kill)
self.route('/logs', method='GET', callback=self.getLog)
def ping(self):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return 'pong'
def index(self):
self.__logger.info('Handling index file request')
return static_file('index.html', root=self.__clientDirectory)
def serveStatic(self, filepath):
self.__logger.info('Serving static file: {}'.format(filepath))
return static_file(filepath, root=self.__clientDirectory)
def getLog(self):
return static_file('machine_app.log', root=self.__serverDirectory)
def start(self):
inStateStepperMode = (request.params['stateStepperMode'] == 'true') if 'stateStepperMode' in request.params else False
configuration = request.json
if self.__subprocess.start(inStateStepperMode, configuration):
return 'OK'
else:
abort(400, 'Failed to start the MachineApp')
def stop(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'stop' }):
self.isPaused = False
return 'OK'
else:
abort(400, 'Failed to stop the MachineApp')
def pause(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'pause' }):
self.isPaused = True
return 'OK'
else:
abort(400, 'Failed to pause the MachineApp')
def resume(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'resume' }):
self.isPaused = False
return 'OK'
else:
abort(400, 'Failed to resume the MachineApp')
# TODO: All E-Stop functionality should be handles on this process
def estop(self):
if self.__estopManager.estop():
self.onEstopEntered()
return 'OK'
else:
abort(400, 'Failed to estop the MachineApp')
def getEstop(self):
return self.__estopManager.getEstop()
def releaseEstop(self):
if self.__estopManager.release():
return 'OK'
else:
abort(400, 'Failed to release estop')
def resetSystem(self):
if self.__estopManager.reset():
return 'OK'
else:
abort(400, 'Failed to reset the system')
def getState(self):
return {
"isRunning": self.__subprocess.isRunning(),
"isPaused": self.isPaused
}
def sendMessage(self):
msg = request.json
if self.__subprocess.sendMsgToSubprocess({ 'topic': msg['topic'], 'message': msg['message'] }):
return 'OK'
else:
abort(400, 'Failed to send the message to the MachineApp')
def kill(self):
self.__subprocess.terminate()
os.kill(os.getpid(), signal.SIGTERM)
return 'OK'
def onEstopEntered(self):
try:
if self.__subprocess.isRunning():
self.__subprocess.terminate()
self.isPaused = False
# Create a temporary MachineAppEngine and call onEstop
temporaryApp = MachineAppEngine()
temporaryApp.initialize()
temporaryApp.onEstop()
except Exception as e:
self.__notifier.sendMessage(NotificationLevel.ERROR, "Failed to run onEstop behavior: %s (%s)" % (traceback.format_exc(), e))
class MQTTPATHS :
ESTOP = "estop"
ESTOP_STATUS = ESTOP + "/status"
ESTOP_TRIGGER_REQUEST = ESTOP + "/trigger/request"
ESTOP_TRIGGER_RESPONSE = ESTOP + "/trigger/response"
ESTOP_RELEASE_REQUEST = ESTOP + "/release/request"
ESTOP_RELEASE_RESPONSE = ESTOP + "/release/response"
ESTOP_SYSTEMRESET_REQUEST = ESTOP + "/systemreset/request"
ESTOP_SYSTEMRESET_RESPONSE = ESTOP + "/systemreset/response"
class EstopManager:
TIMEOUT = 10.0
'''
Small class that subscribes/publishes to MQTT eStop events
to control the current state of the estop.
'''
def __init__(self, onEstopEntered):
self.__onEstopEntered = onEstopEntered
self.__isEstopped = False
self.__notifier = getNotifier()
self.__mqttClient = mqtt.Client()
self.__logger = logging.getLogger(__name__)
self.__mqttClient.on_connect = self.__onConnect
self.__mqttClient.on_message = self.__onMessage
self.__mqttClient.on_disconnect = self.__onDisconnect
self.IP = '127.0.0.1'
self.__mqttClient.connect(self.IP)
self.__mqttClient.loop_start()
def __onConnect(self, client, userData, flags, rc):
if rc == 0:
self.__mqttClient.subscribe(MQTTPATHS.ESTOP_STATUS)
def __onMessage(self, client, userData, msg):
topicParts = msg.topic.split('/')
deviceType = topicParts[1]
if (topicParts[0] == MQTTPATHS.ESTOP) :
if (topicParts[1] == "status") :
self.__isEstopped = json.loads(msg.payload.decode('utf-8'))
if self.__isEstopped:
self.__notifier.sendMessage(NotificationLevel.APP_ESTOP, 'Machine is in estop')
self.__onEstopEntered()
else:
self.__notifier.sendMessage(NotificationLevel.APP_ESTOP_RELEASE, 'Estop Released')
def __onDisconnect(self, client, userData, rc):
logging.info("Disconnected with rtn code [%d]"% (rc))
return
def estop(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_TRIGGER_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish trigger request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_TRIGGER_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def release(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_RELEASE_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish release request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_RELEASE_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def reset(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_SYSTEMRESET_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish reset system request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_SYSTEMRESET_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def getEstop(self):
return 'true' if self.__isEstopped else 'false'
class MachineAppSubprocessManager:
'''
Manages the lifetime of the MachineApp subprocess, forwards stdin commands and stdout information.
'''
def __init__(self):
self.__isRunning = False
self.__subprocess = None
self.__stdout = None
self.__stderr = None
self.__logger = logging.getLogger(__name__)
self.__notifier = getNotifier()
self.__stdthread = Thread(name='subprocess_stdout', target=self.__update)
self.__stdthread.daemon = True
self.__stdthread.start()
def start(self, inStateStepperMode, configuration):
'''
Starts running the MachineApp in a new process
'''
if self.__isRunning == True:
return False
with open('./internal/configuration.json', 'w') as f:
f.write(json.dumps(configuration, indent=4))
command = [ sys.executable, 'subapp.py' ]
if inStateStepperMode:
command.append('--inStateStepperMode')
self.__logger.info('Attempting to run subprocess: {}'.format(' '.join(command)))
self.__subprocess = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self.__isRunning = True
return True
def sendMsgToSubprocess(self, data):
'''
Write a JSON payload to stdin of the child process
'''
if self.__subprocess == None:
return False
self.__subprocess.stdin.write(str(json.dumps(data) + '\r\n').encode('utf-8'))
self.__subprocess.stdin.flush()
return True
def __update(self):
'''
Used to forward notifier messages from the child process to the client. This enables us to not
have to constantly disconnect/reconnect to the child process' websocket whenever the user pressed
start or stop.
We also catch standard 'print' outputs here too, and print them out to the parent process' console.
'''
while True: # Waiting to receive the start command, will sleep in the meantime
while self.__isRunning: # Received start and waiting on the subprocess stdout
if self.__subprocess == None or self.__subprocess.poll() != None or self.__subprocess.stdout.closed:
self.__isRunning = False
self.__logger.info('Subprocess is no longer active')
continue
while self.__subprocess != None and self.__subprocess.stdout != None and self.__subprocess.stdout.readable():
line = self.__subprocess.stdout.readline().decode('utf-8').strip()
if len(line) == 0:
break
line = line.strip()
try:
content = json.loads(line)
if not "type" in content:
continue
msgType = content["type"]
if msgType == SubprocessToParentMessage.NOTIFICATION:
notification = content["data"]
self.__notifier.sendMessage(notification['level'], notification['message'], notification['customPayload'])
except:
print(line)
time.sleep(1.0)
def terminate(self):
'''
Terminates the subprocess immediately
returns:
bool
Successfully terminated a running application or not
'''
if self.__subprocess == None:
return False
self.__subprocess.kill()
self.__subprocess = None
return True
def isRunning(self):
return self.__isRunning
def runServer():
restServer = RestServer()
restServer.run(host='0.0.0.0', port=3011, server='paste') | [
"logging.getLogger",
"bottle.static_file",
"json.loads",
"internal.notifier.getNotifier",
"traceback.format_exc",
"paho.mqtt.client.Client",
"subprocess.Popen",
"json.dumps",
"os.path.join",
"time.sleep",
"paho.mqtt.subscribe.simple",
"os.getpid",
"machine_app.MachineAppEngine",
"threading... | [((728, 756), 'os.path.join', 'os.path.join', (['""".."""', '"""client"""'], {}), "('..', 'client')\n", (740, 756), False, 'import os\n'), ((790, 807), 'os.path.join', 'os.path.join', (['"""."""'], {}), "('.')\n", (802, 807), False, 'import os\n'), ((832, 859), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (849, 859), False, 'import logging\n'), ((886, 899), 'internal.notifier.getNotifier', 'getNotifier', ([], {}), '()\n', (897, 899), False, 'from internal.notifier import getNotifier, NotificationLevel\n'), ((2580, 2634), 'bottle.static_file', 'static_file', (['"""index.html"""'], {'root': 'self.__clientDirectory'}), "('index.html', root=self.__clientDirectory)\n", (2591, 2634), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((2767, 2817), 'bottle.static_file', 'static_file', (['filepath'], {'root': 'self.__clientDirectory'}), '(filepath, root=self.__clientDirectory)\n', (2778, 2817), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((2856, 2915), 'bottle.static_file', 'static_file', (['"""machine_app.log"""'], {'root': 'self.__serverDirectory'}), "('machine_app.log', root=self.__serverDirectory)\n", (2867, 2915), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((6445, 6458), 'internal.notifier.getNotifier', 'getNotifier', ([], {}), '()\n', (6456, 6458), False, 'from internal.notifier import getNotifier, NotificationLevel\n'), ((6487, 6500), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (6498, 6500), True, 'import paho.mqtt.client as mqtt\n'), ((6525, 6552), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6542, 6552), False, 'import logging\n'), ((7647, 7699), 'logging.info', 'logging.info', (["('Disconnected with rtn code [%d]' % rc)"], {}), "('Disconnected with rtn code [%d]' % rc)\n", (7659, 7699), False, 'import logging\n'), ((8065, 8102), 'threading.Thread', 'threading.Thread', ([], {'target': 'mqttResponse'}), '(target=mqttResponse)\n', (8081, 8102), False, 'import threading\n'), ((8322, 8337), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8332, 8337), False, 'import time\n'), ((9094, 9131), 'threading.Thread', 'threading.Thread', ([], {'target': 'mqttResponse'}), '(target=mqttResponse)\n', (9110, 9131), False, 'import threading\n'), ((9351, 9366), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9361, 9366), False, 'import time\n'), ((10125, 10162), 'threading.Thread', 'threading.Thread', ([], {'target': 'mqttResponse'}), '(target=mqttResponse)\n', (10141, 10162), False, 'import threading\n'), ((10382, 10397), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (10392, 10397), False, 'import time\n'), ((11229, 11256), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11246, 11256), False, 'import logging\n'), ((11283, 11296), 'internal.notifier.getNotifier', 'getNotifier', ([], {}), '()\n', (11294, 11296), False, 'from internal.notifier import getNotifier, NotificationLevel\n'), ((11325, 11379), 'threading.Thread', 'Thread', ([], {'name': '"""subprocess_stdout"""', 'target': 'self.__update'}), "(name='subprocess_stdout', target=self.__update)\n", (11331, 11379), False, 'from threading import Thread\n'), ((12022, 12094), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n', (12038, 12094), False, 'import subprocess\n'), ((3232, 3276), 'bottle.abort', 'abort', (['(400)', '"""Failed to start the MachineApp"""'], {}), "(400, 'Failed to start the MachineApp')\n", (3237, 3276), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((3455, 3498), 'bottle.abort', 'abort', (['(400)', '"""Failed to stop the MachineApp"""'], {}), "(400, 'Failed to stop the MachineApp')\n", (3460, 3498), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((3678, 3722), 'bottle.abort', 'abort', (['(400)', '"""Failed to pause the MachineApp"""'], {}), "(400, 'Failed to pause the MachineApp')\n", (3683, 3722), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((3905, 3950), 'bottle.abort', 'abort', (['(400)', '"""Failed to resume the MachineApp"""'], {}), "(400, 'Failed to resume the MachineApp')\n", (3910, 3950), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((4168, 4212), 'bottle.abort', 'abort', (['(400)', '"""Failed to estop the MachineApp"""'], {}), "(400, 'Failed to estop the MachineApp')\n", (4173, 4212), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((4405, 4442), 'bottle.abort', 'abort', (['(400)', '"""Failed to release estop"""'], {}), "(400, 'Failed to release estop')\n", (4410, 4442), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((4560, 4600), 'bottle.abort', 'abort', (['(400)', '"""Failed to reset the system"""'], {}), "(400, 'Failed to reset the system')\n", (4565, 4600), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((4957, 5015), 'bottle.abort', 'abort', (['(400)', '"""Failed to send the message to the MachineApp"""'], {}), "(400, 'Failed to send the message to the MachineApp')\n", (4962, 5015), False, 'from bottle import Bottle, request, response, abort, static_file\n'), ((5091, 5102), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5100, 5102), False, 'import os\n'), ((14214, 14229), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (14224, 14229), False, 'import time\n'), ((5433, 5451), 'machine_app.MachineAppEngine', 'MachineAppEngine', ([], {}), '()\n', (5449, 5451), False, 'from machine_app import MachineAppEngine\n'), ((11734, 11769), 'json.dumps', 'json.dumps', (['configuration'], {'indent': '(4)'}), '(configuration, indent=4)\n', (11744, 11769), False, 'import json\n'), ((13709, 13725), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (13719, 13725), False, 'import json\n'), ((5674, 5696), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5694, 5696), False, 'import traceback\n'), ((7889, 7981), 'paho.mqtt.subscribe.simple', 'MQTTsubscribe.simple', (['MQTTPATHS.ESTOP_TRIGGER_RESPONSE'], {'retained': '(False)', 'hostname': 'self.IP'}), '(MQTTPATHS.ESTOP_TRIGGER_RESPONSE, retained=False,\n hostname=self.IP)\n', (7909, 7981), True, 'import paho.mqtt.subscribe as MQTTsubscribe\n'), ((8926, 9018), 'paho.mqtt.subscribe.simple', 'MQTTsubscribe.simple', (['MQTTPATHS.ESTOP_RELEASE_RESPONSE'], {'retained': '(False)', 'hostname': 'self.IP'}), '(MQTTPATHS.ESTOP_RELEASE_RESPONSE, retained=False,\n hostname=self.IP)\n', (8946, 9018), True, 'import paho.mqtt.subscribe as MQTTsubscribe\n'), ((9953, 10049), 'paho.mqtt.subscribe.simple', 'MQTTsubscribe.simple', (['MQTTPATHS.ESTOP_SYSTEMRESET_RESPONSE'], {'retained': '(False)', 'hostname': 'self.IP'}), '(MQTTPATHS.ESTOP_SYSTEMRESET_RESPONSE, retained=False,\n hostname=self.IP)\n', (9973, 10049), True, 'import paho.mqtt.subscribe as MQTTsubscribe\n'), ((12380, 12396), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (12390, 12396), False, 'import json\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.locations.models import SQLLocation
from corehq.form_processor.models import CommCareCaseIndexSQL
from custom.icds.case_relationships import (
mother_person_case_from_ccs_record_case,
mother_person_case_from_child_health_case,
mother_person_case_from_child_person_case,
)
from custom.icds.const import SUPERVISOR_LOCATION_TYPE_CODE
from custom.icds.exceptions import CaseRelationshipError
from datetime import datetime
from dimagi.utils.logging import notify_exception
def skip_notifying_missing_mother_person_case(e):
# https://manage.dimagi.com/default.asp?271995
# It's expected that some child person cases will not have a mother person case,
# so we don't notify when that's the lookup that fails.
return (
e.child_case_type == 'person' and
e.identifier == 'mother' and
e.relationship == CommCareCaseIndexSQL.CHILD and
e.num_related_found == 0
)
def skip_notifying_missing_ccs_record_parent(e):
# https://manage.dimagi.com/default.asp?277600
# This is an open issue so it probably doesn't make sense to keep notifying
# these unless it gets resolved. Going to make these start notifying at a
# later date so this can be revisited.
return (
datetime.utcnow() < datetime(2018, 8, 1) and
e.child_case_type == 'ccs_record' and
e.identifier == 'parent' and
e.relationship == CommCareCaseIndexSQL.CHILD and
e.num_related_found == 0
)
def recipient_mother_person_case_from_ccs_record_case(case_schedule_instance):
try:
return mother_person_case_from_ccs_record_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_ccs_record_parent(e):
notify_exception(None, message="ICDS ccs_record relationship error")
return None
def recipient_mother_person_case_from_ccs_record_case_excl_migrated_or_opted_out(case_schedule_instance):
from custom.icds.messaging.custom_content import person_case_is_migrated_or_opted_out
mother = recipient_mother_person_case_from_ccs_record_case(case_schedule_instance)
if mother is None or person_case_is_migrated_or_opted_out(mother):
return None
return mother
def recipient_mother_person_case_from_child_health_case(case_schedule_instance):
try:
return mother_person_case_from_child_health_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_mother_person_case(e):
notify_exception(None, message="ICDS child health case relationship error")
return None
def recipient_mother_person_case_from_child_person_case(case_schedule_instance):
try:
return mother_person_case_from_child_person_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_mother_person_case(e):
notify_exception(None, message="ICDS child person case relationship error")
return None
def supervisor_from_awc_owner(case_schedule_instance):
if not case_schedule_instance.case:
return None
# Use one query to lookup the AWC, ensure there is a parent location,
# and ensure the parent location is a supervisor
awc = SQLLocation.objects.filter(
location_id=case_schedule_instance.case.owner_id,
parent__location_type__code=SUPERVISOR_LOCATION_TYPE_CODE
).select_related('parent').first()
if not awc:
return None
return awc.parent
| [
"datetime.datetime",
"datetime.datetime.utcnow",
"custom.icds.case_relationships.mother_person_case_from_child_health_case",
"dimagi.utils.logging.notify_exception",
"custom.icds.case_relationships.mother_person_case_from_child_person_case",
"corehq.apps.locations.models.SQLLocation.objects.filter",
"cu... | [((1668, 1736), 'custom.icds.case_relationships.mother_person_case_from_ccs_record_case', 'mother_person_case_from_ccs_record_case', (['case_schedule_instance.case'], {}), '(case_schedule_instance.case)\n', (1707, 1736), False, 'from custom.icds.case_relationships import mother_person_case_from_ccs_record_case, mother_person_case_from_child_health_case, mother_person_case_from_child_person_case\n'), ((2250, 2294), 'custom.icds.messaging.custom_content.person_case_is_migrated_or_opted_out', 'person_case_is_migrated_or_opted_out', (['mother'], {}), '(mother)\n', (2286, 2294), False, 'from custom.icds.messaging.custom_content import person_case_is_migrated_or_opted_out\n'), ((2442, 2512), 'custom.icds.case_relationships.mother_person_case_from_child_health_case', 'mother_person_case_from_child_health_case', (['case_schedule_instance.case'], {}), '(case_schedule_instance.case)\n', (2483, 2512), False, 'from custom.icds.case_relationships import mother_person_case_from_ccs_record_case, mother_person_case_from_child_health_case, mother_person_case_from_child_person_case\n'), ((2829, 2899), 'custom.icds.case_relationships.mother_person_case_from_child_person_case', 'mother_person_case_from_child_person_case', (['case_schedule_instance.case'], {}), '(case_schedule_instance.case)\n', (2870, 2899), False, 'from custom.icds.case_relationships import mother_person_case_from_ccs_record_case, mother_person_case_from_child_health_case, mother_person_case_from_child_person_case\n'), ((1339, 1356), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1354, 1356), False, 'from datetime import datetime\n'), ((1359, 1379), 'datetime.datetime', 'datetime', (['(2018)', '(8)', '(1)'], {}), '(2018, 8, 1)\n', (1367, 1379), False, 'from datetime import datetime\n'), ((1848, 1916), 'dimagi.utils.logging.notify_exception', 'notify_exception', (['None'], {'message': '"""ICDS ccs_record relationship error"""'}), "(None, message='ICDS ccs_record relationship error')\n", (1864, 1916), False, 'from dimagi.utils.logging import notify_exception\n'), ((2625, 2700), 'dimagi.utils.logging.notify_exception', 'notify_exception', (['None'], {'message': '"""ICDS child health case relationship error"""'}), "(None, message='ICDS child health case relationship error')\n", (2641, 2700), False, 'from dimagi.utils.logging import notify_exception\n'), ((3012, 3087), 'dimagi.utils.logging.notify_exception', 'notify_exception', (['None'], {'message': '"""ICDS child person case relationship error"""'}), "(None, message='ICDS child person case relationship error')\n", (3028, 3087), False, 'from dimagi.utils.logging import notify_exception\n'), ((3364, 3503), 'corehq.apps.locations.models.SQLLocation.objects.filter', 'SQLLocation.objects.filter', ([], {'location_id': 'case_schedule_instance.case.owner_id', 'parent__location_type__code': 'SUPERVISOR_LOCATION_TYPE_CODE'}), '(location_id=case_schedule_instance.case.owner_id,\n parent__location_type__code=SUPERVISOR_LOCATION_TYPE_CODE)\n', (3390, 3503), False, 'from corehq.apps.locations.models import SQLLocation\n')] |
"""
Driver program for training and evaluation.
"""
import argparse
import logging
import numpy as np
import random
import torch
import torch.optim as O
from datasets import get_dataset, get_dataset_configurations
from models import get_model
from runners import Runner
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sentence similarity models')
parser.add_argument('--model', default='sif', choices=['sif', 'mpcnn', 'mpcnn-lite', 'bimpm'], help='Model to use')
parser.add_argument('--dataset', default='sick', choices=['sick', 'wikiqa'], help='Dataset to use')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size')
parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')
parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')
parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')
parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')
# Special options for SIF model
parser.add_argument('--unsupervised', action='store_true', default=False, help='Set this flag to use unsupervised mode.')
parser.add_argument('--alpha', type=float, default=1e-3, help='Smoothing term for smooth inverse frequency baseline model')
parser.add_argument('--no-remove-special-direction', action='store_true', default=False, help='Set to not remove projection onto first principal component')
parser.add_argument('--frequency-dataset', default='enwiki', choices=['train', 'enwiki'])
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)
model = get_model(args, dataset_cls, embedding)
if args.model == 'sif':
model.populate_word_frequency_estimation(train_loader)
total_params = 0
for param in model.parameters():
size = [s for s in param.size()]
total_params += np.prod(size)
logger.info('Total number of parameters: %s', total_params)
loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)
optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)
runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)
runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)
| [
"logging.getLogger",
"torch.manual_seed",
"numpy.prod",
"logging.StreamHandler",
"argparse.ArgumentParser",
"runners.Runner",
"logging.Formatter",
"models.get_model",
"random.seed",
"torch.cuda.manual_seed",
"numpy.random.seed",
"datasets.get_dataset_configurations",
"datasets.get_dataset"
] | [((314, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sentence similarity models"""'}), "(description='Sentence similarity models')\n", (337, 379), False, 'import argparse\n'), ((1789, 1811), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1800, 1811), False, 'import random\n'), ((1816, 1841), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1830, 1841), True, 'import numpy as np\n'), ((1846, 1874), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1863, 1874), False, 'import torch\n'), ((1957, 1984), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1974, 1984), False, 'import logging\n'), ((2028, 2051), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2049, 2051), False, 'import logging\n'), ((2099, 2147), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (2116, 2147), False, 'import logging\n'), ((2274, 2291), 'datasets.get_dataset', 'get_dataset', (['args'], {}), '(args)\n', (2285, 2291), False, 'from datasets import get_dataset, get_dataset_configurations\n'), ((2304, 2343), 'models.get_model', 'get_model', (['args', 'dataset_cls', 'embedding'], {}), '(args, dataset_cls, embedding)\n', (2313, 2343), False, 'from models import get_model\n'), ((2698, 2730), 'datasets.get_dataset_configurations', 'get_dataset_configurations', (['args'], {}), '(args)\n', (2724, 2730), False, 'from datasets import get_dataset, get_dataset_configurations\n'), ((2869, 2970), 'runners.Runner', 'Runner', (['model', 'loss_fn', 'metrics', 'optimizer', 'y_to_score', 'resolved_pred_to_score', 'args.device', 'None'], {}), '(model, loss_fn, metrics, optimizer, y_to_score,\n resolved_pred_to_score, args.device, None)\n', (2875, 2970), False, 'from runners import Runner\n'), ((1909, 1942), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1931, 1942), False, 'import torch\n'), ((2560, 2573), 'numpy.prod', 'np.prod', (['size'], {}), '(size)\n', (2567, 2573), True, 'import numpy as np\n')] |
"""
USFM References Tools
"""
import re
__version__ = '1.1.0'
ANY_REF = re.compile(r'^[1-9A-Z]{3}\.([0-9]{1,3}(_[0-9]+)?(\.[0-9]{1,3})?|INTRO\d+)$')
CHAPTER = re.compile(r'^[1-6A-Z]{3}\.[0-9]{1,3}(_[0-9]+)?$')
CHAPTER_OR_INTRO = re.compile(r'^[1-9A-Z]{3}\.([0-9]{1,3}(_[0-9]+)?|INTRO\d+)$')
SINGLE_CHAPTER_OR_VERSE = re.compile(r'^([A-Za-z]{3})\.([1-9]+\.{0,1}[1-9]*)$')
VERSE = re.compile(r'^[1-6A-Z]{3}\.[0-9]{1,3}(_[0-9]+)?\.[0-9]{1,3}$')
BOOKS = [
'GEN', 'EXO', 'LEV', 'NUM', 'DEU', 'JOS', 'JDG', 'RUT', '1SA', '2SA', '1KI', '2KI', '1CH',
'2CH', 'EZR', 'NEH', 'EST', 'JOB', 'PSA', 'PRO', 'ECC', 'SNG', 'ISA', 'JER', 'LAM', 'EZK',
'DAN', 'HOS', 'JOL', 'AMO', 'OBA', 'JON', 'MIC', 'NAM', 'HAB', 'ZEP', 'HAG', 'ZEC', 'MAL',
'MAT', 'MRK', 'LUK', 'JHN', 'ACT', 'ROM', '1CO', '2CO', 'GAL', 'EPH', 'PHP', 'COL', '1TH',
'2TH', '1TI', '2TI', 'TIT', 'PHM', 'HEB', 'JAS', '1PE', '2PE', '1JN', '2JN', '3JN', 'JUD',
'REV', 'TOB', 'JDT', 'ESG', 'WIS', 'SIR', 'BAR', 'LJE', 'S3Y', 'SUS', 'BEL', '1MA', '2MA',
'3MA', '4MA', '1ES', '2ES', 'MAN', 'PS2', 'ODA', 'PSS', 'EZA', '5EZ', '6EZ', 'DAG', 'PS3',
'2BA', 'LBA', '2MQ', '3MQ', 'REP', '4BA', 'LAO', 'LKA'
]
def valid_chapter(ref):
"""
Succeeds if the given string is a validly structured USFM Bible chapter reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length.
"""
return bool(re.match(CHAPTER, ref) and ref.split('.')[0] in BOOKS)
def valid_chapter_or_intro(ref):
"""
Succeeds if the given string is a validly structured USFM Bible chapter reference or and INTRO.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length.
OR
followed by a period (.) and INTRO, followed by a number
"""
return bool(CHAPTER_OR_INTRO.match(ref)) and ref.split('.')[0] in BOOKS
def valid_usfm(ref):
"""
Succeeds if the given string is a validly structured USFM Bible reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
"""
return bool(ANY_REF.match(ref)) and ref.split('.')[0] in BOOKS
def valid_verse(ref):
"""
Succeeds if the given string is a validly structured USFM Bible verse reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
"""
return bool(re.match(VERSE, ref) and ref.split('.')[0] in BOOKS)
def valid_multi_usfm(ref, delimiter='+'):
"""
Succeeds if the given string is a validly structured set of UFM Bible references.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
Multiple verses are seperated by a plus (+)
Example Multi USFM ref (James1:1-5): JAS.1.1+JAS.1.2+JAS.1.3+JAS.1.4+JAS.1.5
Another Example with COMMA delimiter: JAS.1.1,JAS.1.2,JAS.1.3,JAS.1.4,JAS.1.5
"""
if any([not valid_usfm(usfm) for usfm in ref.split(delimiter)]):
return False
return True
| [
"re.match",
"re.compile"
] | [((74, 152), 're.compile', 're.compile', (['"""^[1-9A-Z]{3}\\\\.([0-9]{1,3}(_[0-9]+)?(\\\\.[0-9]{1,3})?|INTRO\\\\d+)$"""'], {}), "('^[1-9A-Z]{3}\\\\.([0-9]{1,3}(_[0-9]+)?(\\\\.[0-9]{1,3})?|INTRO\\\\d+)$')\n", (84, 152), False, 'import re\n'), ((161, 211), 're.compile', 're.compile', (['"""^[1-6A-Z]{3}\\\\.[0-9]{1,3}(_[0-9]+)?$"""'], {}), "('^[1-6A-Z]{3}\\\\.[0-9]{1,3}(_[0-9]+)?$')\n", (171, 211), False, 'import re\n'), ((231, 293), 're.compile', 're.compile', (['"""^[1-9A-Z]{3}\\\\.([0-9]{1,3}(_[0-9]+)?|INTRO\\\\d+)$"""'], {}), "('^[1-9A-Z]{3}\\\\.([0-9]{1,3}(_[0-9]+)?|INTRO\\\\d+)$')\n", (241, 293), False, 'import re\n'), ((319, 373), 're.compile', 're.compile', (['"""^([A-Za-z]{3})\\\\.([1-9]+\\\\.{0,1}[1-9]*)$"""'], {}), "('^([A-Za-z]{3})\\\\.([1-9]+\\\\.{0,1}[1-9]*)$')\n", (329, 373), False, 'import re\n'), ((381, 444), 're.compile', 're.compile', (['"""^[1-6A-Z]{3}\\\\.[0-9]{1,3}(_[0-9]+)?\\\\.[0-9]{1,3}$"""'], {}), "('^[1-6A-Z]{3}\\\\.[0-9]{1,3}(_[0-9]+)?\\\\.[0-9]{1,3}$')\n", (391, 444), False, 'import re\n'), ((1542, 1564), 're.match', 're.match', (['CHAPTER', 'ref'], {}), '(CHAPTER, ref)\n', (1550, 1564), False, 'import re\n'), ((3033, 3053), 're.match', 're.match', (['VERSE', 'ref'], {}), '(VERSE, ref)\n', (3041, 3053), False, 'import re\n')] |
from wallarm_api.core.api.base_api import BaseApi
from wallarm_api.core.models.trigger import Triggers
class TriggersApi(BaseApi):
def get_triggers(self, clientid):
url = f'/v2/clients/{clientid}/triggers?denormalize=true'
response = self.client.get(url)
triggers = Triggers(triggers=response['triggers'])
return triggers
| [
"wallarm_api.core.models.trigger.Triggers"
] | [((307, 346), 'wallarm_api.core.models.trigger.Triggers', 'Triggers', ([], {'triggers': "response['triggers']"}), "(triggers=response['triggers'])\n", (315, 346), False, 'from wallarm_api.core.models.trigger import Triggers\n')] |
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
names = locals()
img0 = Image.open("./assets/pyCharm.png")
# print image info:
print(img0.size, img0.format, img0.mode, np.array(img0))
# save other format
# img0.save('./assets/pyCharm.tiff')
# img0.convert('RGB').save('./assets/pyCharm.jpeg')
# img0.convert('L').save('./assets/pyCharm.bmp') # 灰度图
img1 = Image.open('./assets/pyCharm.tiff') # 3通道图
img2 = Image.open('./assets/pyCharm.jpeg')
img3 = Image.open('./assets/pyCharm.bmp')
# 3通道图可以拆分
img4, img5, img6 = img2.split()
img7 = Image.merge('RGB', [img5, img6, img4])
plt.figure(figsize=(15, 15))
for i in range(8):
plt.subplot(4, 3, i + 1)
plt.axis('off') # hide axis
plt.imshow(names.get('img' + str(i)))
plt.title(names.get('img' + str(i)).format)
# 去除 png 的白边
img_dir = '/Users/carl/Pictures/logos/'
logo = Image.open(img_dir + 'google.png')
# 将压缩的8位图像转成rgba
logo = logo.convert('RGBA')
# 分离通道
(logo_r, logo_g, logo_b, logo_a) = logo.split()
# 转换成numpy数组
arr_r = np.array(logo_r)
arr_g = np.array(logo_g)
arr_b = np.array(logo_b)
arr_a = np.array(logo_a)
# 筛选像素坐标
idx = (arr_r == 245) & (arr_g == 247) & (arr_b == 247)
# 修改为透明像素点
arr_r[idx] = 0
arr_g[idx] = 0
arr_b[idx] = 0
arr_a[idx] = 0
# 将numpy数组转回图片对象
shard_r = Image.fromarray(arr_r)
shard_g = Image.fromarray(arr_g)
shard_b = Image.fromarray(arr_b)
shard_a = Image.fromarray(arr_a)
rgb_dict = 'rgba'
for i in range(4):
plt.subplot(4, 3, i+9)
plt.axis('off') # hide axis
plt.imshow(names.get('shard_' + rgb_dict[i]))
plt.title(names.get('shard_' + rgb_dict[i]).format)
# 合并通道,保存
Image.merge('RGBA', [shard_r, shard_g, shard_b, shard_a]).save(img_dir + 'logo-1.png', overWrite=True)
plt.tight_layout()
plt.show() | [
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"PIL.Image.merge",
"matplotlib.pyplot.show"
] | [((104, 138), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.png"""'], {}), "('./assets/pyCharm.png')\n", (114, 138), False, 'from PIL import Image\n'), ((389, 424), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.tiff"""'], {}), "('./assets/pyCharm.tiff')\n", (399, 424), False, 'from PIL import Image\n'), ((440, 475), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.jpeg"""'], {}), "('./assets/pyCharm.jpeg')\n", (450, 475), False, 'from PIL import Image\n'), ((483, 517), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.bmp"""'], {}), "('./assets/pyCharm.bmp')\n", (493, 517), False, 'from PIL import Image\n'), ((568, 606), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '[img5, img6, img4]'], {}), "('RGB', [img5, img6, img4])\n", (579, 606), False, 'from PIL import Image\n'), ((608, 636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (618, 636), True, 'from matplotlib import pyplot as plt\n'), ((871, 905), 'PIL.Image.open', 'Image.open', (["(img_dir + 'google.png')"], {}), "(img_dir + 'google.png')\n", (881, 905), False, 'from PIL import Image\n'), ((1027, 1043), 'numpy.array', 'np.array', (['logo_r'], {}), '(logo_r)\n', (1035, 1043), True, 'import numpy as np\n'), ((1052, 1068), 'numpy.array', 'np.array', (['logo_g'], {}), '(logo_g)\n', (1060, 1068), True, 'import numpy as np\n'), ((1077, 1093), 'numpy.array', 'np.array', (['logo_b'], {}), '(logo_b)\n', (1085, 1093), True, 'import numpy as np\n'), ((1102, 1118), 'numpy.array', 'np.array', (['logo_a'], {}), '(logo_a)\n', (1110, 1118), True, 'import numpy as np\n'), ((1281, 1303), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_r'], {}), '(arr_r)\n', (1296, 1303), False, 'from PIL import Image\n'), ((1314, 1336), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_g'], {}), '(arr_g)\n', (1329, 1336), False, 'from PIL import Image\n'), ((1347, 1369), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_b'], {}), '(arr_b)\n', (1362, 1369), False, 'from PIL import Image\n'), ((1380, 1402), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_a'], {}), '(arr_a)\n', (1395, 1402), False, 'from PIL import Image\n'), ((1722, 1740), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1738, 1740), True, 'from matplotlib import pyplot as plt\n'), ((1741, 1751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1749, 1751), True, 'from matplotlib import pyplot as plt\n'), ((200, 214), 'numpy.array', 'np.array', (['img0'], {}), '(img0)\n', (208, 214), True, 'import numpy as np\n'), ((661, 685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', '(i + 1)'], {}), '(4, 3, i + 1)\n', (672, 685), True, 'from matplotlib import pyplot as plt\n'), ((690, 705), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (698, 705), True, 'from matplotlib import pyplot as plt\n'), ((1445, 1469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', '(i + 9)'], {}), '(4, 3, i + 9)\n', (1456, 1469), True, 'from matplotlib import pyplot as plt\n'), ((1472, 1487), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1480, 1487), True, 'from matplotlib import pyplot as plt\n'), ((1618, 1675), 'PIL.Image.merge', 'Image.merge', (['"""RGBA"""', '[shard_r, shard_g, shard_b, shard_a]'], {}), "('RGBA', [shard_r, shard_g, shard_b, shard_a])\n", (1629, 1675), False, 'from PIL import Image\n')] |
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
from rpython.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder
from rpython.jit.backend.x86.regloc import LocationCodeBuilder
from rpython.jit.backend.x86.arch import IS_X86_32, IS_X86_64, WORD
from rpython.jit.backend.x86 import valgrind
# XXX: Seems nasty to change the superclass of MachineCodeBlockWrapper
# like this
if IS_X86_32:
codebuilder_cls = X86_32_CodeBuilder
backend_name = 'x86'
elif IS_X86_64:
codebuilder_cls = X86_64_CodeBuilder
backend_name = 'x86_64'
class MachineCodeBlockWrapper(BlockBuilderMixin,
LocationCodeBuilder,
codebuilder_cls):
def __init__(self):
self.init_block_builder()
# a list of relative positions; for each position p, the bytes
# at [p-4:p] encode an absolute address that will need to be
# made relative. Only works on 32-bit!
if WORD == 4:
self.relocations = []
else:
self.relocations = None
#
# ResOperation --> offset in the assembly.
# ops_offset[None] represents the beginning of the code after the last op
# (i.e., the tail of the loop)
self.ops_offset = {}
def add_pending_relocation(self):
self.relocations.append(self.get_relative_pos())
def mark_op(self, op):
pos = self.get_relative_pos()
self.ops_offset[op] = pos
def copy_to_raw_memory(self, addr):
self._copy_to_raw_memory(addr)
if self.relocations is not None:
for reloc in self.relocations:
p = addr + reloc
adr = rffi.cast(rffi.LONGP, p - WORD)
adr[0] = intmask(adr[0] - p)
valgrind.discard_translations(addr, self.get_relative_pos())
self._dump(addr, "jit-backend-dump", backend_name)
| [
"rpython.rlib.rarithmetic.intmask",
"rpython.rtyper.lltypesystem.rffi.cast"
] | [((1918, 1949), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['rffi.LONGP', '(p - WORD)'], {}), '(rffi.LONGP, p - WORD)\n', (1927, 1949), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((1975, 1994), 'rpython.rlib.rarithmetic.intmask', 'intmask', (['(adr[0] - p)'], {}), '(adr[0] - p)\n', (1982, 1994), False, 'from rpython.rlib.rarithmetic import intmask\n')] |
from typing import List, Literal, Optional, Sequence
from pydantic import Field, root_validator, validator
from pydantic.main import BaseModel
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.steps.utils.validation import validate_unique_columns
from weaverbird.pipeline.types import ColumnName, PopulatedWithFieldnames, TemplatedVariable
AggregateFn = Literal[
'avg',
'sum',
'min',
'max',
'count',
'count distinct',
'first',
'last',
'count distinct including empty',
]
class Aggregation(BaseModel):
class Config(PopulatedWithFieldnames):
...
new_columns: List[ColumnName] = Field(alias='newcolumns')
agg_function: AggregateFn = Field(alias='aggfunction')
columns: List[ColumnName]
@validator('columns', pre=True)
def validate_unique_columns(cls, value):
return validate_unique_columns(value)
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'column' in values:
values['columns'] = [values.pop('column')]
if 'newcolumn' in values:
values['new_columns'] = [values.pop('newcolumn')]
return values
class AggregateStep(BaseStep):
name = Field('aggregate', const=True)
on: List[ColumnName] = []
aggregations: Sequence[Aggregation]
keep_original_granularity: Optional[bool] = Field(
default=False, alias='keepOriginalGranularity'
)
class Config(PopulatedWithFieldnames):
...
class AggregationWithVariables(Aggregation):
class Config(PopulatedWithFieldnames):
...
new_columns: List[TemplatedVariable] = Field(alias='newcolumns')
agg_function: TemplatedVariable = Field(alias='aggfunction')
columns: List[TemplatedVariable]
class AggregateStepWithVariables(AggregateStep, StepWithVariablesMixin):
aggregations: Sequence[AggregationWithVariables]
| [
"pydantic.Field",
"weaverbird.pipeline.steps.utils.validation.validate_unique_columns",
"pydantic.root_validator",
"pydantic.validator"
] | [((751, 776), 'pydantic.Field', 'Field', ([], {'alias': '"""newcolumns"""'}), "(alias='newcolumns')\n", (756, 776), False, 'from pydantic import Field, root_validator, validator\n'), ((809, 835), 'pydantic.Field', 'Field', ([], {'alias': '"""aggfunction"""'}), "(alias='aggfunction')\n", (814, 835), False, 'from pydantic import Field, root_validator, validator\n'), ((872, 902), 'pydantic.validator', 'validator', (['"""columns"""'], {'pre': '(True)'}), "('columns', pre=True)\n", (881, 902), False, 'from pydantic import Field, root_validator, validator\n'), ((1000, 1024), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1014, 1024), False, 'from pydantic import Field, root_validator, validator\n'), ((1316, 1346), 'pydantic.Field', 'Field', (['"""aggregate"""'], {'const': '(True)'}), "('aggregate', const=True)\n", (1321, 1346), False, 'from pydantic import Field, root_validator, validator\n'), ((1465, 1518), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'alias': '"""keepOriginalGranularity"""'}), "(default=False, alias='keepOriginalGranularity')\n", (1470, 1518), False, 'from pydantic import Field, root_validator, validator\n'), ((1735, 1760), 'pydantic.Field', 'Field', ([], {'alias': '"""newcolumns"""'}), "(alias='newcolumns')\n", (1740, 1760), False, 'from pydantic import Field, root_validator, validator\n'), ((1799, 1825), 'pydantic.Field', 'Field', ([], {'alias': '"""aggfunction"""'}), "(alias='aggfunction')\n", (1804, 1825), False, 'from pydantic import Field, root_validator, validator\n'), ((963, 993), 'weaverbird.pipeline.steps.utils.validation.validate_unique_columns', 'validate_unique_columns', (['value'], {}), '(value)\n', (986, 993), False, 'from weaverbird.pipeline.steps.utils.validation import validate_unique_columns\n')] |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.utils import validate_expense_against_budget
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes'):
if gl_map:
if not cancel:
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map):
for e in gl_map:
if e.account == gle.account \
and cstr(e.get('party_type'))==cstr(gle.get('party_type')) \
and cstr(e.get('party'))==cstr(gle.get('party')) \
and cstr(e.get('against_voucher'))==cstr(gle.get('against_voucher')) \
and cstr(e.get('against_voucher_type')) == cstr(gle.get('against_voucher_type')) \
and cstr(e.get('cost_center')) == cstr(gle.get('cost_center')):
return e
def save_entries(gl_map, adv_adj, update_outstanding):
validate_account_for_auto_accounting_for_stock(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding)
# check against budget
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding)
gle.submit()
def validate_account_for_auto_accounting_for_stock(gl_map):
if cint(frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock")) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Warehouse' and (warehouse != '' and warehouse is not null)""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.db.get_value("Company", gl_map[0].company, "default_currency", cache=True))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if abs(debit_credit_diff) >= (5.0 / (10**precision)):
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff)
def make_round_off_gle(gl_map, debit_credit_diff):
round_off_account, round_off_cost_center = frappe.db.get_value("Company", gl_map[0].company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
round_off_gle = frappe._dict()
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "fiscal_year", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit_in_account_currency": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit_in_account_currency": debit_credit_diff if debit_credit_diff > 0 else 0,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
gl_map.append(round_off_gle)
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""select * from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes':
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
| [
"frappe._dict",
"frappe.db.get_value",
"frappe.db.get_single_value",
"frappe._",
"erpnext.accounts.doctype.gl_entry.gl_entry.validate_balance_type",
"erpnext.accounts.utils.validate_expense_against_budget",
"frappe.db.sql",
"frappe.get_doc",
"erpnext.accounts.doctype.gl_entry.gl_entry.validate_froze... | [((3387, 3407), 'frappe.get_doc', 'frappe.get_doc', (['args'], {}), '(args)\n', (3401, 3407), False, 'import frappe\n'), ((4529, 4562), 'frappe.utils.flt', 'flt', (['debit_credit_diff', 'precision'], {}), '(debit_credit_diff, precision)\n', (4532, 4562), False, 'from frappe.utils import flt, cstr, cint\n'), ((5305, 5319), 'frappe._dict', 'frappe._dict', ([], {}), '()\n', (5317, 5319), False, 'import frappe\n'), ((6519, 6708), 'frappe.db.sql', 'frappe.db.sql', (['"""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s"""', "(voucher_type or gl_entries[0]['voucher_type'], voucher_no or gl_entries[0]\n ['voucher_no'])"], {}), "(\n 'delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s', (\n voucher_type or gl_entries[0]['voucher_type'], voucher_no or gl_entries\n [0]['voucher_no']))\n", (6532, 6708), False, 'import frappe\n'), ((3251, 3289), 'erpnext.accounts.utils.validate_expense_against_budget', 'validate_expense_against_budget', (['entry'], {}), '(entry)\n', (3282, 3289), False, 'from erpnext.accounts.utils import validate_expense_against_budget\n'), ((4383, 4410), 'frappe.utils.flt', 'flt', (['entry.debit', 'precision'], {}), '(entry.debit, precision)\n', (4386, 4410), False, 'from frappe.utils import flt, cstr, cint\n'), ((4428, 4456), 'frappe.utils.flt', 'flt', (['entry.credit', 'precision'], {}), '(entry.credit, precision)\n', (4431, 4456), False, 'from frappe.utils import flt, cstr, cint\n'), ((4977, 5078), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Company"""', 'gl_map[0].company', "['round_off_account', 'round_off_cost_center']"], {}), "('Company', gl_map[0].company, ['round_off_account',\n 'round_off_cost_center'])\n", (4996, 5078), False, 'import frappe\n'), ((6305, 6448), 'frappe.db.sql', 'frappe.db.sql', (['"""select * from `tabGL Entry`\n\t\t\twhere voucher_type=%s and voucher_no=%s"""', '(voucher_type, voucher_no)'], {'as_dict': '(True)'}), '(\n """select * from `tabGL Entry`\n\t\t\twhere voucher_type=%s and voucher_no=%s"""\n , (voucher_type, voucher_no), as_dict=True)\n', (6318, 6448), False, 'import frappe\n'), ((6457, 6516), 'erpnext.accounts.doctype.gl_entry.gl_entry.check_freezing_date', 'check_freezing_date', (["gl_entries[0]['posting_date']", 'adv_adj'], {}), "(gl_entries[0]['posting_date'], adv_adj)\n", (6476, 6516), False, 'from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, check_freezing_date, update_outstanding_amt, validate_frozen_account\n'), ((6729, 6779), 'erpnext.accounts.doctype.gl_entry.gl_entry.validate_frozen_account', 'validate_frozen_account', (["entry['account']", 'adv_adj'], {}), "(entry['account'], adv_adj)\n", (6752, 6779), False, 'from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, check_freezing_date, update_outstanding_amt, validate_frozen_account\n'), ((6782, 6830), 'erpnext.accounts.doctype.gl_entry.gl_entry.validate_balance_type', 'validate_balance_type', (["entry['account']", 'adv_adj'], {}), "(entry['account'], adv_adj)\n", (6803, 6830), False, 'from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, check_freezing_date, update_outstanding_amt, validate_frozen_account\n'), ((6833, 6871), 'erpnext.accounts.utils.validate_expense_against_budget', 'validate_expense_against_budget', (['entry'], {}), '(entry)\n', (6864, 6871), False, 'from erpnext.accounts.utils import validate_expense_against_budget\n'), ((1117, 1133), 'frappe.utils.flt', 'flt', (['entry.debit'], {}), '(entry.debit)\n', (1120, 1133), False, 'from frappe.utils import flt, cstr, cint\n'), ((1221, 1257), 'frappe.utils.flt', 'flt', (['entry.debit_in_account_currency'], {}), '(entry.debit_in_account_currency)\n', (1224, 1257), False, 'from frappe.utils import flt, cstr, cint\n'), ((1431, 1448), 'frappe.utils.flt', 'flt', (['entry.credit'], {}), '(entry.credit)\n', (1434, 1448), False, 'from frappe.utils import flt, cstr, cint\n'), ((1536, 1573), 'frappe.utils.flt', 'flt', (['entry.credit_in_account_currency'], {}), '(entry.credit_in_account_currency)\n', (1539, 1573), False, 'from frappe.utils import flt, cstr, cint\n'), ((3608, 3684), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Accounts Settings"""', '"""auto_accounting_for_stock"""'], {}), "('Accounts Settings', 'auto_accounting_for_stock')\n", (3634, 3684), False, 'import frappe\n'), ((4236, 4322), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Company"""', 'gl_map[0].company', '"""default_currency"""'], {'cache': '(True)'}), "('Company', gl_map[0].company, 'default_currency', cache\n =True)\n", (4255, 4322), False, 'import frappe\n'), ((5135, 5183), 'frappe._', '_', (['"""Please mention Round Off Account in Company"""'], {}), "('Please mention Round Off Account in Company')\n", (5136, 5183), False, 'from frappe import _\n'), ((5232, 5284), 'frappe._', '_', (['"""Please mention Round Off Cost Center in Company"""'], {}), "('Please mention Round Off Cost Center in Company')\n", (5233, 5284), False, 'from frappe import _\n'), ((1157, 1174), 'frappe.utils.flt', 'flt', (['entry.credit'], {}), '(entry.credit)\n', (1160, 1174), False, 'from frappe.utils import flt, cstr, cint\n'), ((1177, 1193), 'frappe.utils.flt', 'flt', (['entry.debit'], {}), '(entry.debit)\n', (1180, 1193), False, 'from frappe.utils import flt, cstr, cint\n'), ((1307, 1344), 'frappe.utils.flt', 'flt', (['entry.credit_in_account_currency'], {}), '(entry.credit_in_account_currency)\n', (1310, 1344), False, 'from frappe.utils import flt, cstr, cint\n'), ((1347, 1383), 'frappe.utils.flt', 'flt', (['entry.debit_in_account_currency'], {}), '(entry.debit_in_account_currency)\n', (1350, 1383), False, 'from frappe.utils import flt, cstr, cint\n'), ((1471, 1487), 'frappe.utils.flt', 'flt', (['entry.debit'], {}), '(entry.debit)\n', (1474, 1487), False, 'from frappe.utils import flt, cstr, cint\n'), ((1490, 1507), 'frappe.utils.flt', 'flt', (['entry.credit'], {}), '(entry.credit)\n', (1493, 1507), False, 'from frappe.utils import flt, cstr, cint\n'), ((1622, 1658), 'frappe.utils.flt', 'flt', (['entry.debit_in_account_currency'], {}), '(entry.debit_in_account_currency)\n', (1625, 1658), False, 'from frappe.utils import flt, cstr, cint\n'), ((1661, 1698), 'frappe.utils.flt', 'flt', (['entry.credit_in_account_currency'], {}), '(entry.credit_in_account_currency)\n', (1664, 1698), False, 'from frappe.utils import flt, cstr, cint\n'), ((2009, 2029), 'frappe.utils.flt', 'flt', (['same_head.debit'], {}), '(same_head.debit)\n', (2012, 2029), False, 'from frappe.utils import flt, cstr, cint\n'), ((2032, 2048), 'frappe.utils.flt', 'flt', (['entry.debit'], {}), '(entry.debit)\n', (2035, 2048), False, 'from frappe.utils import flt, cstr, cint\n'), ((2096, 2136), 'frappe.utils.flt', 'flt', (['same_head.debit_in_account_currency'], {}), '(same_head.debit_in_account_currency)\n', (2099, 2136), False, 'from frappe.utils import flt, cstr, cint\n'), ((2139, 2175), 'frappe.utils.flt', 'flt', (['entry.debit_in_account_currency'], {}), '(entry.debit_in_account_currency)\n', (2142, 2175), False, 'from frappe.utils import flt, cstr, cint\n'), ((2198, 2219), 'frappe.utils.flt', 'flt', (['same_head.credit'], {}), '(same_head.credit)\n', (2201, 2219), False, 'from frappe.utils import flt, cstr, cint\n'), ((2222, 2239), 'frappe.utils.flt', 'flt', (['entry.credit'], {}), '(entry.credit)\n', (2225, 2239), False, 'from frappe.utils import flt, cstr, cint\n'), ((2288, 2329), 'frappe.utils.flt', 'flt', (['same_head.credit_in_account_currency'], {}), '(same_head.credit_in_account_currency)\n', (2291, 2329), False, 'from frappe.utils import flt, cstr, cint\n'), ((2332, 2369), 'frappe.utils.flt', 'flt', (['entry.credit_in_account_currency'], {}), '(entry.credit_in_account_currency)\n', (2335, 2369), False, 'from frappe.utils import flt, cstr, cint\n'), ((3768, 3911), 'frappe.db.sql', 'frappe.db.sql', (['"""select name from tabAccount\n\t\t\t\twhere account_type = \'Warehouse\' and (warehouse != \'\' and warehouse is not null)"""'], {}), '(\n """select name from tabAccount\n\t\t\t\twhere account_type = \'Warehouse\' and (warehouse != \'\' and warehouse is not null)"""\n )\n', (3781, 3911), False, 'import frappe\n'), ((4177, 4204), 'frappe.get_meta', 'frappe.get_meta', (['"""GL Entry"""'], {}), "('GL Entry')\n", (4192, 4204), False, 'import frappe\n'), ((728, 847), 'frappe._', '_', (['"""Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."""'], {}), "('Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction.'\n )\n", (729, 847), False, 'from frappe import _\n'), ((2484, 2499), 'frappe.utils.flt', 'flt', (['x.debit', '(9)'], {}), '(x.debit, 9)\n', (2487, 2499), False, 'from frappe.utils import flt, cstr, cint\n'), ((2506, 2522), 'frappe.utils.flt', 'flt', (['x.credit', '(9)'], {}), '(x.credit, 9)\n', (2509, 2522), False, 'from frappe.utils import flt, cstr, cint\n'), ((4633, 4697), 'frappe._', '_', (['"""Debit and Credit not equal for {0} #{1}. Difference is {2}."""'], {}), "('Debit and Credit not equal for {0} #{1}. Difference is {2}.')\n", (4634, 4697), False, 'from frappe import _\n'), ((3984, 4044), 'frappe._', '_', (['"""Account: {0} can only be updated via Stock Transactions"""'], {}), "('Account: {0} can only be updated via Stock Transactions')\n", (3985, 4044), False, 'from frappe import _\n')] |
import pygame
import inspect
from .bullet import Bullet
from .player import Player
PRESSED = {
pygame.K_UP: 'up',
pygame.K_DOWN: 'down',
pygame.K_LEFT: 'left',
pygame.K_RIGHT: 'right',
}
DOWN = {
pygame.K_LSHIFT: 'shift',
}
UP = {
pygame.K_LSHIFT: 'unshift',
}
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((640, 480))
self.tasks = []
self.scripts = []
self.objects = pygame.sprite.Group()
self.bullets = pygame.sprite.Group()
self.player = None
def run(self):
"""
Start the game
:return:
"""
clock = pygame.time.Clock()
while True:
clock.tick(60)
self.screen.fill((255, 255, 255))
# self.objects.clear(self.screen)
self.objects.draw(self.screen)
for task in list(self.tasks):
try:
while True:
result = task[-1].__next__()
if inspect.isgenerator(result):
task.append(result)
else:
break
except StopIteration:
task.pop()
except IndexError:
self.tasks.remove(task)
if not self.tasks and self.scripts:
self.tasks.append([self.scripts.pop()])
keys = pygame.key.get_pressed()
events = pygame.event.get()
for obj in self.objects:
for event in events:
if event.type == pygame.KEYDOWN and event.key in DOWN and hasattr(obj, DOWN[event.key]):
obj.__getattribute__(DOWN[event.key])()
elif event.type == pygame.KEYUP and event.key in UP and hasattr(obj, UP[event.key]):
obj.__getattribute__(UP[event.key])()
for k, v in PRESSED.items():
if keys[k] and hasattr(obj, v):
obj.__getattribute__(v)()
if keys[pygame.K_q]:
break
for obj in list(self.objects.sprites()):
if hasattr(obj, 'step'):
obj.step()
if hasattr(obj, 'should_remove') and obj.should_remove:
self.objects.remove(obj)
pygame.display.update()
def add_task(self, task):
"""
Adds a task to be run.
:param task:
:return:
"""
if not inspect.isgenerator(task):
raise TypeError('Object must be a generator')
self.tasks.append(task)
def set_player(self, player):
"""
Sets the player object
:param player:
:return:
"""
if not isinstance(player, Player):
raise TypeError("Object must be Player")
if self.player:
self.player.kill()
self.player = player
self.objects.add(player)
def add_bullet(self, bullet):
"""
Adds a bullet onto the screen
:param bullet:
:return:
"""
if not isinstance(bullet, Bullet):
raise TypeError("Object must be a Bullet")
self.objects.add(bullet)
self.bullets.add(bullet)
return bullet
def add_bullets(self, *bullets):
"""
Adds multiple bullets
:param bullets:
:return:
"""
return [self.add_bullet(b) for b in bullets]
def add_script(self, module):
"""
Adds a script to the end of the script queue
:param module:
:return:
"""
module = __import__(module, fromlist=['*'])
task = module.main(self)
self.scripts.insert(0, task)
return task
| [
"pygame.event.get",
"pygame.sprite.Group",
"pygame.display.set_mode",
"inspect.isgenerator",
"pygame.key.get_pressed",
"pygame.time.Clock",
"pygame.display.update"
] | [((350, 385), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(640, 480)'], {}), '((640, 480))\n', (373, 385), False, 'import pygame\n'), ((459, 480), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (478, 480), False, 'import pygame\n'), ((504, 525), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (523, 525), False, 'import pygame\n'), ((653, 672), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (670, 672), False, 'import pygame\n'), ((1444, 1468), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (1466, 1468), False, 'import pygame\n'), ((1490, 1508), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1506, 1508), False, 'import pygame\n'), ((2383, 2406), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2404, 2406), False, 'import pygame\n'), ((2546, 2571), 'inspect.isgenerator', 'inspect.isgenerator', (['task'], {}), '(task)\n', (2565, 2571), False, 'import inspect\n'), ((1030, 1057), 'inspect.isgenerator', 'inspect.isgenerator', (['result'], {}), '(result)\n', (1049, 1057), False, 'import inspect\n')] |
import numpy as np
import pandas as pd
import pytest
from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2
from inz.utils import chi2, select_k_best, split, train_test_split
def test_split_list_int():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_int():
ints = range(7)
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width():
ints = list(range(3))
want = [[0, 1, 2]]
get = list(split(ints, 4))
assert len(get) == len(want)
assert get == want
def test_split_list_str():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_str():
string = ''.join(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(string, 2))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]]),
np.array([[8, 9]])]
get = list(split(array, 2))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_split_list_int_not_allow():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5]]
get = list(split(ints, 3, False))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width_not_allow():
ints = list(range(3))
want = []
get = list(split(ints, 4, False))
assert len(get) == len(want)
assert get == want
def test_split_list_str_not_allow():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int_not_allow():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]])]
get = list(split(array, 2, False))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str_not_allow():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
@pytest.fixture
def data():
X = pd.read_csv('../../data/data.csv')
y = X.pop('Choroba')
return X.values, y.values
def test_chi2(data):
X, y = data
sk_val, _ = sk_chi2(X, y)
my_val = chi2(X, y)
np.testing.assert_equal(sk_val, my_val)
def test_select_k_best(data):
X, y = data
for i in range(1, 31):
sk_sup1 = SelectKBest(sk_chi2, i).fit(X, y).get_support()
sk_sup2 = SelectKBest(sk_chi2, i).fit(X, y).get_support(True)
my_sup1 = select_k_best(X, y, k=i)
my_sup2 = select_k_best(X, y, k=i, indices=True)
np.testing.assert_equal(sk_sup1, my_sup1, str(i))
np.testing.assert_equal(sk_sup2, sorted(my_sup2), str(i))
def test_train_test_split():
x = np.arange(10)
get = train_test_split(x, shuffle=False)
want = [np.arange(7), np.arange(7, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
def test_train_test_split5():
x = np.arange(10)
get = train_test_split(x, test_size=.5, shuffle=False)
want = [np.arange(5), np.arange(5, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
if __name__ == '__main__':
pytest.main()
| [
"inz.utils.train_test_split",
"inz.utils.split",
"numpy.testing.assert_equal",
"pandas.read_csv",
"inz.utils.select_k_best",
"pytest.main",
"sklearn.feature_selection.SelectKBest",
"numpy.array",
"inz.utils.chi2",
"numpy.array_equal",
"sklearn.feature_selection.chi2",
"numpy.arange"
] | [((2899, 2933), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/data.csv"""'], {}), "('../../data/data.csv')\n", (2910, 2933), True, 'import pandas as pd\n'), ((3044, 3057), 'sklearn.feature_selection.chi2', 'sk_chi2', (['X', 'y'], {}), '(X, y)\n', (3051, 3057), True, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n'), ((3071, 3081), 'inz.utils.chi2', 'chi2', (['X', 'y'], {}), '(X, y)\n', (3075, 3081), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3087, 3126), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['sk_val', 'my_val'], {}), '(sk_val, my_val)\n', (3110, 3126), True, 'import numpy as np\n'), ((3603, 3616), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3612, 3616), True, 'import numpy as np\n'), ((3627, 3661), 'inz.utils.train_test_split', 'train_test_split', (['x'], {'shuffle': '(False)'}), '(x, shuffle=False)\n', (3643, 3661), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3811, 3824), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3820, 3824), True, 'import numpy as np\n'), ((3835, 3884), 'inz.utils.train_test_split', 'train_test_split', (['x'], {'test_size': '(0.5)', 'shuffle': '(False)'}), '(x, test_size=0.5, shuffle=False)\n', (3851, 3884), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((4026, 4039), 'pytest.main', 'pytest.main', ([], {}), '()\n', (4037, 4039), False, 'import pytest\n'), ((297, 311), 'inz.utils.split', 'split', (['ints', '(3)'], {}), '(ints, 3)\n', (302, 311), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((467, 481), 'inz.utils.split', 'split', (['ints', '(3)'], {}), '(ints, 3)\n', (472, 481), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((646, 660), 'inz.utils.split', 'split', (['ints', '(4)'], {}), '(ints, 4)\n', (651, 660), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((849, 866), 'inz.utils.split', 'split', (['strings', '(2)'], {}), '(strings, 2)\n', (854, 866), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1046, 1062), 'inz.utils.split', 'split', (['string', '(2)'], {}), '(string, 2)\n', (1051, 1062), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1216, 1242), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (1224, 1242), True, 'import numpy as np\n'), ((1256, 1282), 'numpy.array', 'np.array', (['[[4, 5], [6, 7]]'], {}), '([[4, 5], [6, 7]])\n', (1264, 1282), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.array', 'np.array', (['[[8, 9]]'], {}), '([[8, 9]])\n', (1304, 1314), True, 'import numpy as np\n'), ((1331, 1346), 'inz.utils.split', 'split', (['array', '(2)'], {}), '(array, 2)\n', (1336, 1346), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1462, 1482), 'numpy.array_equal', 'np.array_equal', (['i', 'j'], {}), '(i, j)\n', (1476, 1482), True, 'import numpy as np\n'), ((1613, 1630), 'inz.utils.split', 'split', (['strings', '(2)'], {}), '(strings, 2)\n', (1618, 1630), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1802, 1823), 'inz.utils.split', 'split', (['ints', '(3)', '(False)'], {}), '(ints, 3, False)\n', (1807, 1823), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1989, 2010), 'inz.utils.split', 'split', (['ints', '(4)', '(False)'], {}), '(ints, 4, False)\n', (1994, 2010), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2209, 2233), 'inz.utils.split', 'split', (['strings', '(2)', '(False)'], {}), '(strings, 2, False)\n', (2214, 2233), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2397, 2423), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (2405, 2423), True, 'import numpy as np\n'), ((2437, 2463), 'numpy.array', 'np.array', (['[[4, 5], [6, 7]]'], {}), '([[4, 5], [6, 7]])\n', (2445, 2463), True, 'import numpy as np\n'), ((2480, 2502), 'inz.utils.split', 'split', (['array', '(2)', '(False)'], {}), '(array, 2, False)\n', (2485, 2502), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2618, 2638), 'numpy.array_equal', 'np.array_equal', (['i', 'j'], {}), '(i, j)\n', (2632, 2638), True, 'import numpy as np\n'), ((2779, 2803), 'inz.utils.split', 'split', (['strings', '(2)', '(False)'], {}), '(strings, 2, False)\n', (2784, 2803), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3357, 3381), 'inz.utils.select_k_best', 'select_k_best', (['X', 'y'], {'k': 'i'}), '(X, y, k=i)\n', (3370, 3381), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3400, 3438), 'inz.utils.select_k_best', 'select_k_best', (['X', 'y'], {'k': 'i', 'indices': '(True)'}), '(X, y, k=i, indices=True)\n', (3413, 3438), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3674, 3686), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (3683, 3686), True, 'import numpy as np\n'), ((3688, 3704), 'numpy.arange', 'np.arange', (['(7)', '(10)'], {}), '(7, 10)\n', (3697, 3704), True, 'import numpy as np\n'), ((3743, 3770), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['*i'], {}), '(*i)\n', (3766, 3770), True, 'import numpy as np\n'), ((3896, 3908), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3905, 3908), True, 'import numpy as np\n'), ((3910, 3926), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (3919, 3926), True, 'import numpy as np\n'), ((3965, 3992), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['*i'], {}), '(*i)\n', (3988, 3992), True, 'import numpy as np\n'), ((1164, 1188), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (1173, 1188), True, 'import numpy as np\n'), ((2345, 2369), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (2354, 2369), True, 'import numpy as np\n'), ((3220, 3243), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['sk_chi2', 'i'], {}), '(sk_chi2, i)\n', (3231, 3243), False, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n'), ((3286, 3309), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['sk_chi2', 'i'], {}), '(sk_chi2, i)\n', (3297, 3309), False, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n')] |
from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1BDM(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| [
"pytezos.michelson.formatter.micheline_to_michelson",
"tests.get_data"
] | [((351, 436), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json"""'}), "(path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json'\n )\n", (359, 436), False, 'from tests import get_data\n'), ((690, 768), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz"""'}), "(path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz')\n", (698, 768), False, 'from tests import get_data\n'), ((1055, 1140), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json"""'}), "(path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json'\n )\n", (1063, 1140), False, 'from tests import get_data\n'), ((1337, 1425), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')\n", (1345, 1425), False, 'from tests import get_data\n'), ((1685, 1771), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz')\n", (1693, 1771), False, 'from tests import get_data\n'), ((2059, 2147), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')\n", (2067, 2147), False, 'from tests import get_data\n'), ((2346, 2436), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')\n", (2354, 2436), False, 'from tests import get_data\n'), ((2700, 2788), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz')\n", (2708, 2788), False, 'from tests import get_data\n'), ((3080, 3170), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')\n", (3088, 3170), False, 'from tests import get_data\n'), ((3369, 3459), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')\n", (3377, 3459), False, 'from tests import get_data\n'), ((3723, 3811), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz')\n", (3731, 3811), False, 'from tests import get_data\n'), ((4103, 4193), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')\n", (4111, 4193), False, 'from tests import get_data\n'), ((4392, 4482), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')\n", (4400, 4482), False, 'from tests import get_data\n'), ((4746, 4834), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz')\n", (4754, 4834), False, 'from tests import get_data\n'), ((5126, 5216), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')\n", (5134, 5216), False, 'from tests import get_data\n'), ((5415, 5505), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')\n", (5423, 5505), False, 'from tests import get_data\n'), ((5769, 5857), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz')\n", (5777, 5857), False, 'from tests import get_data\n'), ((6149, 6239), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')\n", (6157, 6239), False, 'from tests import get_data\n'), ((6438, 6528), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')\n", (6446, 6528), False, 'from tests import get_data\n'), ((6792, 6880), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz')\n", (6800, 6880), False, 'from tests import get_data\n'), ((7172, 7262), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')\n", (7180, 7262), False, 'from tests import get_data\n'), ((7461, 7551), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')\n", (7469, 7551), False, 'from tests import get_data\n'), ((7815, 7903), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz')\n", (7823, 7903), False, 'from tests import get_data\n'), ((8195, 8285), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')\n", (8203, 8285), False, 'from tests import get_data\n'), ((8484, 8574), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')\n", (8492, 8574), False, 'from tests import get_data\n'), ((8838, 8926), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz')\n", (8846, 8926), False, 'from tests import get_data\n'), ((9218, 9308), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')\n", (9226, 9308), False, 'from tests import get_data\n'), ((485, 563), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz"""'}), "(path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz')\n", (493, 563), False, 'from tests import get_data\n'), ((822, 907), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json"""'}), "(path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json'\n )\n", (830, 907), False, 'from tests import get_data\n'), ((1189, 1221), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (1211, 1221), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((1474, 1560), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz')\n", (1482, 1560), False, 'from tests import get_data\n'), ((1820, 1908), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')\n", (1828, 1908), False, 'from tests import get_data\n'), ((2196, 2228), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (2218, 2228), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((2485, 2573), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz')\n", (2493, 2573), False, 'from tests import get_data\n'), ((2837, 2927), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')\n", (2845, 2927), False, 'from tests import get_data\n'), ((3219, 3251), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (3241, 3251), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((3508, 3596), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz')\n", (3516, 3596), False, 'from tests import get_data\n'), ((3860, 3950), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')\n", (3868, 3950), False, 'from tests import get_data\n'), ((4242, 4274), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (4264, 4274), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((4531, 4619), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz')\n", (4539, 4619), False, 'from tests import get_data\n'), ((4883, 4973), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')\n", (4891, 4973), False, 'from tests import get_data\n'), ((5265, 5297), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (5287, 5297), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((5554, 5642), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz')\n", (5562, 5642), False, 'from tests import get_data\n'), ((5906, 5996), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')\n", (5914, 5996), False, 'from tests import get_data\n'), ((6288, 6320), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (6310, 6320), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((6577, 6665), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz')\n", (6585, 6665), False, 'from tests import get_data\n'), ((6929, 7019), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')\n", (6937, 7019), False, 'from tests import get_data\n'), ((7311, 7343), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (7333, 7343), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((7600, 7688), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz')\n", (7608, 7688), False, 'from tests import get_data\n'), ((7952, 8042), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')\n", (7960, 8042), False, 'from tests import get_data\n'), ((8334, 8366), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (8356, 8366), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((8623, 8711), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz')\n", (8631, 8711), False, 'from tests import get_data\n'), ((8975, 9065), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json"""'}), "(path=\n 'contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')\n", (8983, 9065), False, 'from tests import get_data\n'), ((9357, 9389), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (9379, 9389), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n')] |
import timeit
setup = """
import phash
import imageio
import numpy as np
mod = phash.phash()
"""
read_image = """
img0 = np.array(imageio.imread('data/frog.jpeg', pilmode='F'))
mod.img_hash_f32(img0)
"""
print('data/frog.jpeg', timeit.timeit(read_image, setup=setup, number=100) * 10, "ms")
setup_imagehash = """
from PIL import Image
import imagehash
"""
hash_bench = """
imagehash.phash(Image.open('data/frog.jpeg'))
"""
print('data/frog.jpeg', timeit.timeit(hash_bench, setup=setup_imagehash, number=100) * 10, "ms")
| [
"timeit.timeit"
] | [((232, 282), 'timeit.timeit', 'timeit.timeit', (['read_image'], {'setup': 'setup', 'number': '(100)'}), '(read_image, setup=setup, number=100)\n', (245, 282), False, 'import timeit\n'), ((454, 514), 'timeit.timeit', 'timeit.timeit', (['hash_bench'], {'setup': 'setup_imagehash', 'number': '(100)'}), '(hash_bench, setup=setup_imagehash, number=100)\n', (467, 514), False, 'import timeit\n')] |
import torch
import sys
import coremltools as ct
from hidt.style_transformer import StyleTransformer
from ops import inference_size
sys.path.append('./HiDT')
class HiDT(torch.nn.Module):
def __init__(self):
super().__init__()
config_path = './configs/daytime.yaml'
gen_weights_path = './trained_models/generator/daytime.pt'
with torch.no_grad():
self.style_transformer = StyleTransformer(
config_path,
gen_weights_path,
inference_size=inference_size,
device='cpu')
def forward(self, content, style_to_transfer):
n, c, h, w = content.shape
style_to_transfer = style_to_transfer.view(1, 1, 3, 1)
style_to_transfer = style_to_transfer.repeat(n, 1, 1, 1)
encoding_fn = self.style_transformer.trainer.gen.content_encoder
content_decomposition = encoding_fn(content)
decoder_input = {'content': content_decomposition[0],
'intermediate_outputs': content_decomposition[1:],
'style': style_to_transfer}
transferred = self.style_transformer.trainer.gen.decode(decoder_input)[
'images']
return transferred.view(1, n * c, h, w)
if __name__ == '__main__':
image = torch.zeros(1, 3, 256, 452)
style_to_transfer = torch.zeros(3)
model = HiDT()
model.eval()
for param in model.parameters():
param.requires_grad = False
model.style_transformer.trainer.eval()
for param in model.style_transformer.trainer.parameters():
param.requires_grad = False
# transferred = model(image, style_to_transfer)
traced_model = torch.jit.trace(
model, (image, style_to_transfer), check_trace=False)
mlmodel = ct.convert(model=traced_model, inputs=[
ct.TensorType(name="image", shape=ct.Shape(image.shape)),
ct.TensorType(name="style", shape=ct.Shape(style_to_transfer.shape))
])
mlmodel.save("~/hidtcore.mlmodel")
| [
"torch.jit.trace",
"hidt.style_transformer.StyleTransformer",
"torch.no_grad",
"sys.path.append",
"torch.zeros",
"coremltools.Shape"
] | [((134, 159), 'sys.path.append', 'sys.path.append', (['"""./HiDT"""'], {}), "('./HiDT')\n", (149, 159), False, 'import sys\n'), ((1304, 1331), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(256)', '(452)'], {}), '(1, 3, 256, 452)\n', (1315, 1331), False, 'import torch\n'), ((1356, 1370), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (1367, 1370), False, 'import torch\n'), ((1693, 1762), 'torch.jit.trace', 'torch.jit.trace', (['model', '(image, style_to_transfer)'], {'check_trace': '(False)'}), '(model, (image, style_to_transfer), check_trace=False)\n', (1708, 1762), False, 'import torch\n'), ((369, 384), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (382, 384), False, 'import torch\n'), ((423, 520), 'hidt.style_transformer.StyleTransformer', 'StyleTransformer', (['config_path', 'gen_weights_path'], {'inference_size': 'inference_size', 'device': '"""cpu"""'}), "(config_path, gen_weights_path, inference_size=\n inference_size, device='cpu')\n", (439, 520), False, 'from hidt.style_transformer import StyleTransformer\n'), ((1868, 1889), 'coremltools.Shape', 'ct.Shape', (['image.shape'], {}), '(image.shape)\n', (1876, 1889), True, 'import coremltools as ct\n'), ((1934, 1967), 'coremltools.Shape', 'ct.Shape', (['style_to_transfer.shape'], {}), '(style_to_transfer.shape)\n', (1942, 1967), True, 'import coremltools as ct\n')] |
import json
from .helpers import send_notification_to_user
def send_user_notification(user, payload, ttl=0):
payload = json.dumps(payload)
send_notification_to_user(user, payload, ttl) | [
"json.dumps"
] | [((123, 142), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (133, 142), False, 'import json\n')] |