text
stringlengths 2
999k
|
|---|
import pandas
import re
import string
import sys
"""split-data.py: split data into different classes according to labels"""
__author__ = "YuanSun"
def main(input_file, output_path):
df = pandas.read_csv(input_file)
n = max(df['label'].values)
# split file according to labels
for i in range(1, n+1):
output_file = output_path + str(i) + '.txt'
with open(output_file, "w") as f:
for item in df['message'][df['label'] == i]:
text = re.sub("http\S+", 'URLS', item)
text = re.sub('[%s]' % string.digits, ' NUMBERS', text)
f.write(text + '\n')
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
#openbrowser.py
import pyautogui
import time
import webbrowser
#chrome = (662,1048)
#pyautogui.click(chrome)
url = 'https://www.google.com'
webbrowser.open(url)
time.sleep(3)
pyautogui.write('thailand')
pyautogui.press('enter')
#######
def Search(word):
time.sleep(3)
for i in range(7):
pyautogui.press('tab')
pyautogui.press('backspace')
pyautogui.write(word)
pyautogui.press('enter')
time.sleep(3)
pyautogui.screenshot(word + '.png')
Search('singapore')
Search('usa')
Search('china')
|
# Python 3
# Be sure you have followed the instructions to download the 98-0.txt,
# the text of A Tale of Two Cities, by Charles Dickens
import collections
file=open('98-0.txt', encoding="utf8")
# if you want to use stopwords, here's an example of how to do this
# stopwords = set(line.strip() for line in open('stopwords'))
# create your data structure here. F
wordcount={}
# Instantiate a dictionary, and for every word in the file, add to
# the dictionary if it doesn't exist. If it does, increase the count.
# Hint: To eliminate duplicates, remember to split by punctuation,
# and use case demiliters. The functions lower() and split() will be useful!
for word in file.read().lower().split():
word = word.replace(".","")
word = word.replace(",","")
word = word.replace("\"","")
word = word.replace("“","")
if word not in file:
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
# after building your wordcount, you can then sort it and return the first
# n words. If you want, collections.Counter may be useful.
d = collections.Counter(wordcount)
#print(d.most_common(10))
for word, count in d.most_common(10):
print(word, ": ", count)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common_layers import *
from .cbam import CBAM_Module
from torchvision.models.resnet import BasicBlock, Bottleneck
class CbamBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(CbamBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.cbam = CBAM_Module(planes, 16)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.cbam(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class CbamBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(CbamBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.cbam = CBAM_Module(planes * 4, 16)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.cbam(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class LCZResNet(nn.Module):
def __init__(self, block, in_planes, layers, num_classes=17, first_kernel=3):
self.inplanes = 64
super(LCZResNet, self).__init__()
self.conv1 = nn.Conv2d(in_planes, 64, kernel_size=first_kernel, stride=1, padding=first_kernel // 2,
bias=False) # 32 * 32
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # 16 * 16
self.layer1 = self._make_layer(block, 64, layers[0], stride=1) # 16 * 16
self.layer2 = self._make_layer(block, 128, layers[1], stride=1) # 16 * 16
self.layer3 = self._make_layer(block, 256, layers[2], stride=1) # 16 * 16
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # 8 * 8
self.fc = nn.Sequential(nn.Conv2d(512 * block.expansion, num_classes, 1),
nn.AvgPool2d(8)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# x = x.transpose(2, 3).transpose(1, 2) # b channel s s
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.fc(x).view(x.size(0), -1)
return x
def resnet10(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-10 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBlock, in_planes, [1, 1, 1, 1], num_classes, **kwargs)
return model
def resnet18(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBlock, in_planes, [2, 2, 2, 2], num_classes, **kwargs)
return model
def resnet34(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBlock, in_planes, [3, 4, 6, 3], num_classes, **kwargs)
return model
def resnet50(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBottleneck, in_planes, [3, 4, 6, 3], num_classes, **kwargs)
return model
def resnet101(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBottleneck, in_planes, [3, 4, 23, 3], num_classes, **kwargs)
return model
def resnet152(in_planes=20, num_classes=17, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = LCZResNet(CbamBottleneck, in_planes, [3, 8, 36, 3], num_classes, **kwargs)
return model
|
import numpy as np
from matplotlib import pyplot as plt
import open3d as o3d
from mpl_toolkits.mplot3d import Axes3D
from time import time, sleep
from quaternion import rotate, mat_from_quaternion_np, conjugate_np, multiply_np, to_magnitude_np, conjugate, multiply, to_magnitude
from helpers import randquat, slerp, quat2mat, get_command
from classes import ExplicitLoss, ImplicitLoss, IoUAccuracy, LeastSquares
import torch
from sklearn.preprocessing import normalize
import cv2
import os
def sample_points_uniform(parameters, rad_resolution=0.5):
a1, a2, a3 = parameters[:3]
e1, e2 = parameters[3:5]
theta, gamma = np.arange(-np.pi, np.pi, rad_resolution), np.arange(-np.pi / 2, np.pi / 2, rad_resolution)
w, h = len(theta), len(gamma)
pts = np.zeros(shape=(h * w, 3))
cg = np.cos(gamma)
sg = np.sin(gamma)
iter = 0
for t in theta:
pts[iter * h:(iter + 1) * h, 0] = a1 * np.sign(cg * np.cos(t)) * (np.abs(cg) ** e1) * (np.abs(np.cos(t)) ** e2)
pts[iter * h:(iter + 1) * h, 1] = a2 * np.sign(cg * np.sin(t)) * (np.abs(cg) ** e1) * (np.abs(np.sin(t)) ** e2)
pts[iter * h:(iter + 1) * h, 2] = a3 * np.sign(sg) * (np.abs(sg) ** e1)
iter += 1
return pts
def quaternion_diffs(q):
quat_diffs = []
quat_diffs.append(q[0])
for qi in range(1, len(q)):
app = multiply_np(q[qi], conjugate_np(q[qi-1]))
quat_diffs.append(app)
quat_diffs.append(conjugate_np(q[-1]))
return np.stack(quat_diffs)
def init_visualization():
vis = o3d.visualization.Visualizer()
vis.create_window(window_name="Visu", width=800, height=600)
opt = vis.get_render_option()
opt.mesh_show_back_face = True
return vis
def randsq():
return np.concatenate((np.random.uniform(0.1, 0.3, (3,)), np.random.uniform(0.1, 1, (2,)), np.random.uniform(0.34, 0.65, (3,))))
if __name__ == "__main__":
device = torch.device("cuda:0")
req_grad = False
#fourcc = cv2.VideoWriter_fourcc(*'MP4V')
#out = cv2.VideoWriter('output.mp4', fourcc, 30.0, (800, 600))
granularity = 32
sharpness = 260
tau = 1.5
#loss = ImplicitLoss(render_size=64, device=device, tau=tau, sigmoid_sharpness=sharpness)
loss = ExplicitLoss(render_size=granularity, device=device)
acc = IoUAccuracy(render_size=128, device=device, full=True)
torch.autograd.set_detect_anomaly(True)
true_parameters = np.array([0.17840092, 0.29169756, 0.19272356, 0.564326, 0.850042, 0.5160052,0.51887995, 0.41229093, 0.468217, 0.567843, -0.355409, 0.576204])
pred_parameters = np.concatenate([randsq(), randquat()])
q_true = true_parameters[8:]
q_pred = pred_parameters[8:]
# Create SQ pointcloud
params_true = true_parameters[:8]
params_pred = pred_parameters[:8]
M = quat2mat(q_true[-4:])
params = np.concatenate((params_true[:3] * 255., params_true[3:5], params_true[5:8] * 255, M.ravel()))
command = get_command("../", "visu_true.bmp", params)
os.system(command)
img = cv2.imread("visu_true.bmp", 0)/255
true_img = torch.from_numpy(img).to(device)
true_img = true_img.unsqueeze(0).unsqueeze(0)
pts = sample_points_uniform(params_true, rad_resolution=0.1)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
# Create convex hull
hull, _ = pcd.compute_convex_hull()
hull.compute_vertex_normals()
# Line mesh for true quadric
hull_ls1 = o3d.geometry.LineSet.create_from_triangle_mesh(hull)
hull_ls1.paint_uniform_color((1, 0, 0))
mat_transform_true = np.eye(4)
mat_transform_true[:3, :3] = mat_from_quaternion_np(q_true)[0]
mat_transform_true[:3, 3] = params_true[5:8]
hull_ls1.transform(mat_transform_true)
debug = False
if not debug:
vis = init_visualization()
vis.add_geometry(hull_ls1)
ctr = vis.get_view_control()
parameters = o3d.io.read_pinhole_camera_parameters("ScreenCamera_2020-08-31-14-29-43.json")
#ctr.convert_from_pinhole_camera_parameters(parameters)
lr = 0.001
i = 0
while True:
#ctr.convert_from_pinhole_camera_parameters(parameters)
pts = sample_points_uniform(params_pred, rad_resolution=0.1)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
# Create convex hull
hull_pred, _ = pcd.compute_convex_hull()
hull_pred.compute_vertex_normals()
if not debug:
vis.add_geometry(hull_pred, reset_bounding_box=False)
mat_q = mat_from_quaternion_np(q_pred)[0]
#hull_pred.rotate(mat_q, [0, 0, 0])
mat_transform = np.eye(4)
mat_transform[:3, :3] = mat_q
mat_transform[:3, 3] = params_pred[5:8]
hull_pred.transform(mat_transform)
true = torch.tensor(
[
np.concatenate([params_true, q_true])
], device='cuda:0')
pred = torch.tensor(
[
#q_pred
np.concatenate([params_pred, q_pred])
],
device='cuda:0', requires_grad=True)
#l = l_a + l_e + l_t + l_q
l = loss(true, pred)
#a = acc(true, pred[:, 8:])
a = acc(true, pred)
diff = to_magnitude(multiply(torch.from_numpy(q_true), conjugate(torch.from_numpy(q_pred))))
loss_np = l.detach().cpu().item()
acc_np = a.detach().cpu().item()
l.backward()
print("Iter", i)
print("Predicted:", q_pred)
print("Predicted:", normalize([q_pred]))
print("True", q_true)
print("Grads:", pred.grad)
print("Loss:", loss_np)
print("Accuracy:", acc_np)
print("Angle difference:", diff)
print("Lr:", lr)
print("---------------------------------------")
#if i > 3500:
# lr = 0.005
#if i > 10000:
# lr = 0.001
gradient = pred.grad.detach().cpu().numpy()[0]
#exit()
q_pred -= lr * gradient[8:]
q_pred = normalize([q_pred])[0]
params_pred -= lr * gradient[:8]
if not debug:
vis.update_geometry(hull_pred)
vis.poll_events()
vis.update_renderer()
#if i < 100 and i % 10 == 0 or i % 100 == 0 :
# image = vis.capture_screen_float_buffer(False)
# plt.imsave("iteration_images/render_"+str(i)+".png", np.asarray(image), dpi=1)
#img = np.asarray(vis.capture_screen_float_buffer(True))
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
#img = (img * 255).astype('uint8')
#out.write(img)
#cv2.imshow("asdasd", img)
#cv2.waitKey()
sleep(0.0)
#hull_pred.rotate(mat_q.T, [0, 0, 0])
i+=1
if not debug:
vis.remove_geometry(hull_pred, reset_bounding_box=False)
out.release()
vis.destroy_window()
"""
tensor([[ 0.1166, 0.1219, 0.1367, 0.7549, 0.5152, 0.5910, 0.3958, 0.6315,
0.4587, -0.8290, 0.2780, 0.1585],
[ 0.1996, 0.2313, 0.1593, 0.1441, 0.5284, 0.6089, 0.4262, 0.4990,
-0.0571, -0.5224, -0.7675, -0.3670],
[ 0.1361, 0.1884, 0.1332, 0.4246, 0.5976, 0.4382, 0.4804, 0.5376,
0.8509, -0.1591, 0.3079, -0.3948],
[ 0.2459, 0.2737, 0.2902, 0.9892, 0.8445, 0.6455, 0.6013, 0.3996,
-0.1203, 0.1149, -0.8852, -0.4345],
[ 0.2554, 0.1024, 0.1243, 0.4442, 0.4847, 0.3753, 0.3542, 0.5288,
0.1294, 0.7612, -0.4037, -0.4907],
[ 0.1852, 0.2629, 0.1143, 0.8005, 0.9665, 0.4829, 0.4545, 0.5807,
0.3983, -0.2366, -0.8171, -0.3431],
[ 0.2660, 0.1470, 0.2511, 0.2668, 0.2718, 0.3558, 0.3467, 0.4545,
0.1357, 0.3233, -0.8649, -0.3593],
[ 0.1758, 0.2447, 0.1448, 0.1068, 0.3768, 0.6330, 0.4025, 0.5981,
0.4034, -0.9041, -0.0815, -0.1145],
[ 0.1334, 0.1594, 0.1117, 0.7234, 0.7051, 0.4303, 0.3712, 0.5394,
-0.9855, -0.1327, -0.0569, 0.0895],
[ 0.1205, 0.2326, 0.2777, 0.5483, 0.9702, 0.3524, 0.5263, 0.4284,
0.3523, -0.1127, -0.3552, -0.8585],
[ 0.2890, 0.2399, 0.2582, 0.8803, 0.4080, 0.4028, 0.4000, 0.3670,
-0.0963, 0.3156, 0.9424, 0.0549],
[ 0.1244, 0.1707, 0.2050, 0.5093, 0.2526, 0.5665, 0.4282, 0.5605,
0.6121, -0.0320, 0.3257, -0.7199],
[ 0.2130, 0.1083, 0.1996, 0.2209, 0.5531, 0.5501, 0.5381, 0.5670,
-0.3280, 0.4695, -0.6633, 0.4818],
[ 0.1998, 0.1674, 0.2467, 0.8034, 0.2889, 0.5178, 0.6429, 0.6091,
0.0290, -0.7991, 0.5314, -0.2796],
[ 0.1538, 0.2216, 0.1758, 0.9157, 0.8596, 0.3967, 0.4878, 0.6195,
0.5293, 0.0706, -0.7950, -0.2878],
[ 0.1271, 0.1388, 0.1781, 0.6055, 0.6471, 0.4492, 0.5019, 0.5206,
0.5552, -0.4088, -0.3807, -0.6162],
[ 0.2878, 0.2136, 0.2034, 0.7296, 0.8222, 0.5515, 0.3460, 0.3959,
0.2475, 0.3209, 0.8976, 0.1736],
[ 0.1693, 0.1401, 0.2480, 0.5930, 0.9853, 0.3543, 0.4828, 0.5380,
-0.8285, -0.0754, -0.0749, -0.5498],
[ 0.1535, 0.2843, 0.2577, 0.9704, 0.9133, 0.5776, 0.4093, 0.3733,
-0.1003, -0.4884, -0.6963, -0.5164],
[ 0.1168, 0.1042, 0.2698, 0.1223, 0.2900, 0.4281, 0.6561, 0.4106,
0.2557, 0.4165, -0.8332, -0.2587],
[ 0.2087, 0.2722, 0.2937, 0.1960, 0.9346, 0.6499, 0.4697, 0.6547,
-0.4381, -0.1764, 0.8376, -0.2747],
[ 0.2370, 0.2842, 0.2350, 0.5211, 0.7314, 0.3657, 0.6316, 0.3885,
-0.2366, -0.4358, -0.3830, 0.7793],
[ 0.1221, 0.2473, 0.2742, 0.8191, 0.4745, 0.5495, 0.6175, 0.5682,
-0.5078, -0.0618, -0.8589, -0.0258],
[ 0.2055, 0.1897, 0.2784, 0.5529, 0.8743, 0.5187, 0.6402, 0.5866,
0.5061, -0.7978, 0.0086, 0.3275],
[ 0.2589, 0.2107, 0.2886, 0.2819, 0.8018, 0.5727, 0.4977, 0.5167,
0.1688, -0.4158, 0.0362, 0.8929],
[ 0.2799, 0.1774, 0.1346, 0.8796, 0.9384, 0.6305, 0.5679, 0.3860,
-0.2286, 0.8879, -0.3343, -0.2184],
[ 0.2582, 0.1026, 0.2181, 0.4935, 0.6959, 0.6124, 0.3472, 0.4668,
0.4542, 0.1331, 0.6144, -0.6312],
[ 0.2634, 0.1360, 0.2881, 0.7654, 0.2847, 0.6367, 0.5711, 0.4283,
-0.6848, -0.5008, -0.5269, -0.0502],
[ 0.1064, 0.2766, 0.1063, 0.1339, 0.9209, 0.5230, 0.5823, 0.6049,
-0.6852, -0.1264, 0.3632, 0.6186],
[ 0.2823, 0.1928, 0.1359, 0.3902, 0.7718, 0.5545, 0.3924, 0.3647,
0.2095, -0.1600, -0.8313, 0.4893],
[ 0.1356, 0.2214, 0.1431, 0.7403, 0.8372, 0.4034, 0.4375, 0.5896,
-0.7584, -0.0414, -0.6034, -0.2430],
[ 0.2570, 0.2692, 0.2181, 0.5957, 0.5709, 0.5906, 0.4201, 0.4082,
0.7313, 0.5486, -0.3786, -0.1445]], device='cuda:0')
tensor([[ 0.1630, 0.1170, 0.1343, 0.6863, 0.6626, 0.5960, 0.4090, 0.6172,
-0.4237, 0.6347, -0.3787, -0.5236],
[ 0.1917, 0.2286, 0.1711, 0.2439, 0.5095, 0.6019, 0.4181, 0.5152,
-0.7787, 0.3488, 0.0312, -0.5205],
[ 0.1293, 0.1235, 0.1828, 0.4384, 0.3096, 0.4346, 0.4808, 0.5618,
-0.8571, 0.3437, -0.1400, -0.3572],
[ 0.2592, 0.2608, 0.2323, 0.8135, 0.8656, 0.6431, 0.6031, 0.4286,
-0.8127, 0.4474, -0.3436, -0.1458],
[ 0.1531, 0.2296, 0.1248, 0.4013, 0.1570, 0.3599, 0.4024, 0.5185,
-0.4208, 0.5940, 0.0135, -0.6855],
[ 0.1987, 0.2375, 0.1179, 0.5858, 0.6243, 0.4729, 0.4545, 0.5758,
-0.7421, 0.4574, -0.4521, -0.1888],
[ 0.2624, 0.2540, 0.1852, 0.2622, 0.2963, 0.3386, 0.3398, 0.4476,
-0.8438, 0.3232, 0.1379, -0.4056],
[ 0.2566, 0.2324, 0.1855, 0.2029, 0.1093, 0.6408, 0.4319, 0.4983,
-0.3603, 0.7097, -0.2401, -0.5557],
[ 0.1639, 0.1353, 0.1494, 0.4947, 0.5564, 0.4326, 0.3827, 0.4977,
-0.5882, 0.4781, -0.5506, -0.3498],
[ 0.1519, 0.2057, 0.2480, 0.3223, 0.5185, 0.3922, 0.5507, 0.4446,
-0.4215, 0.8022, -0.4022, -0.1307],
[ 0.2337, 0.2768, 0.2423, 0.7401, 0.4105, 0.4054, 0.4052, 0.3858,
-0.6201, 0.6997, -0.3093, -0.1740],
[ 0.1210, 0.1677, 0.1872, 0.3454, 0.2310, 0.5654, 0.4316, 0.5811,
-0.7153, 0.3179, 0.0071, -0.6223],
[ 0.2184, 0.1870, 0.2020, 0.2726, 0.2964, 0.5651, 0.5283, 0.5028,
-0.6453, 0.2344, -0.0224, -0.7267],
[ 0.1901, 0.2181, 0.2076, 0.4778, 0.4167, 0.5146, 0.6539, 0.6032,
-0.3854, 0.7534, -0.1189, -0.5194],
[ 0.1909, 0.1615, 0.2119, 0.6875, 0.6855, 0.3861, 0.5104, 0.5994,
-0.5495, 0.6275, -0.2012, -0.5137],
[ 0.1729, 0.1322, 0.1732, 0.4193, 0.5756, 0.4634, 0.5046, 0.4908,
-0.4382, 0.5721, -0.5471, -0.4259],
[ 0.2239, 0.2710, 0.2052, 0.7652, 0.6803, 0.5463, 0.3632, 0.4036,
-0.5316, 0.7329, -0.0942, -0.4139],
[ 0.2065, 0.1659, 0.2329, 0.3605, 0.6712, 0.3505, 0.5146, 0.4863,
-0.6230, 0.5484, -0.4425, -0.3395],
[ 0.1456, 0.2459, 0.2273, 0.7667, 0.7134, 0.5705, 0.4229, 0.4159,
-0.8172, 0.4838, -0.0928, -0.2992],
[ 0.1174, 0.2648, 0.1561, 0.2703, 0.1541, 0.4152, 0.6272, 0.3920,
-0.8950, 0.3129, -0.0259, -0.3168],
[ 0.2603, 0.2921, 0.2661, 0.4779, 0.2944, 0.6103, 0.4599, 0.6307,
-0.8729, 0.3918, 0.0788, -0.2799],
[ 0.2762, 0.2781, 0.2288, 0.4289, 0.6837, 0.3454, 0.6506, 0.3698,
-0.7695, 0.3812, -0.4475, -0.2495],
[ 0.2332, 0.2448, 0.2165, 0.4134, 0.4686, 0.5900, 0.6019, 0.5035,
-0.7365, 0.6298, -0.2319, 0.0842],
[ 0.1957, 0.1801, 0.2783, 0.5315, 0.6489, 0.5190, 0.6397, 0.6006,
-0.5024, 0.7965, -0.0535, -0.3320],
[ 0.2499, 0.2885, 0.2022, 0.5052, 0.3483, 0.5616, 0.5026, 0.5365,
-0.7641, 0.2749, -0.2994, -0.5010],
[ 0.1688, 0.2537, 0.1379, 0.7182, 0.6605, 0.6098, 0.5630, 0.3986,
-0.7925, 0.4795, -0.1291, -0.3540],
[ 0.2642, 0.1096, 0.2140, 0.5459, 0.6507, 0.6209, 0.3557, 0.4677,
-0.6275, 0.6159, -0.1721, -0.4442],
[ 0.1646, 0.2522, 0.2490, 0.2561, 0.5137, 0.6093, 0.5871, 0.4333,
-0.8367, 0.3655, -0.2208, -0.3430],
[ 0.2428, 0.1152, 0.1397, 0.4883, 0.4749, 0.5474, 0.5982, 0.6099,
-0.4195, 0.4782, -0.4393, -0.6343],
[ 0.1848, 0.1716, 0.2682, 0.4392, 0.3136, 0.5784, 0.4008, 0.3472,
-0.6743, 0.3131, -0.0589, -0.6662],
[ 0.2163, 0.1408, 0.2062, 0.6765, 0.5591, 0.3840, 0.4244, 0.5450,
-0.4820, 0.5742, -0.3126, -0.5833],
[ 0.2500, 0.2579, 0.2323, 0.6857, 0.4722, 0.5751, 0.4233, 0.4227,
-0.5385, 0.7467, 0.1290, -0.3685]], device='cuda:0',
grad_fn=<CatBackward>)
"""
|
import os
import logging
from flask import Flask, render_template, Response, send_from_directory, request, current_app
flask_app = Flask(__name__)
logging.basicConfig()
log = logging.getLogger(__name__)
@flask_app.route("/")
def main():
return render_template('main.html', title='Inventory')
@flask_app.route("/small")
def small():
return render_template('small.html', title='pyDashie')
@flask_app.route("/dashboard/<dashlayout>/")
def custom_layout(dashlayout):
return render_template('%s.html'%dashlayout, title='pyDashie')
@flask_app.route("/assets/application.js")
def javascripts():
if not hasattr(current_app, 'javascripts'):
import coffeescript
scripts = [
'assets/javascripts/jquery.js',
'assets/javascripts/es5-shim.js',
'assets/javascripts/d3.v2.min.js',
'assets/javascripts/batman.js',
'assets/javascripts/batman.jquery.js',
'assets/javascripts/jquery.gridster.js',
'assets/javascripts/jquery.leanModal.min.js',
#'assets/javascripts/dashing.coffee',
'assets/javascripts/dashing.gridster.coffee',
'assets/javascripts/jquery.knob.js',
'assets/javascripts/rickshaw.min.js',
'assets/javascripts/dashing-chartjs.coffee',
#'assets/javascripts/application.coffee',
'assets/javascripts/app.js',
#'widgets/clock/clock.coffee',
'widgets/number/number.coffee',
'widgets/linechart/linechart.coffee',
]
nizzle = True
if not nizzle:
scripts = ['assets/javascripts/application.js']
output = []
for path in scripts:
output.append('// JS: %s\n' % path)
if '.coffee' in path:
log.info('Compiling Coffee for %s ' % path)
contents = coffeescript.compile_file(path)
else:
f = open(path)
contents = f.read()
f.close()
output.append(contents)
if nizzle:
f = open('/tmp/foo.js', 'w')
for o in output:
print(o, end="", file=f)
f.close()
f = open('/tmp/foo.js', 'rb')
output = f.read()
f.close()
current_app.javascripts = output
else:
current_app.javascripts = ''.join(output)
return Response(current_app.javascripts, mimetype='application/javascript')
@flask_app.route('/assets/application.css')
def application_css():
scripts = [
'assets/stylesheets/application.css',
]
output = ''
for path in scripts:
output = output + open(path).read()
return Response(output, mimetype='text/css')
@flask_app.route('/assets/images/<path:filename>')
def send_static_img(filename):
directory = os.path.join('assets', 'images')
return send_from_directory(directory, filename)
@flask_app.route('/views/<widget_name>.html')
def widget_html(widget_name):
html = '%s.html' % widget_name
path = os.path.join('widgets', widget_name, html)
if os.path.isfile(path):
f = open(path)
contents = f.read()
f.close()
return contents
import queue
class EventsManager:
def __init__(self):
self.events_queue = {}
self.last_events = {}
self.using_events = True
self.MAX_QUEUE_LENGTH = 20
self.stopped = False
events_manager = EventsManager()
@flask_app.route('/events')
def events():
if events_manager.using_events:
event_stream_port = request.environ['REMOTE_PORT']
current_event_queue = queue.Queue()
events_manager.events_queue[event_stream_port] = current_event_queue
current_app.logger.info('New Client %s connected. Total Clients: %s' %
(event_stream_port, len(events_manager.events_queue)))
#Start the newly connected client off by pushing the current last events
for event in events_manager.last_events.values():
current_event_queue.put(event)
return Response(pop_queue(current_event_queue), mimetype='text/event-stream')
return Response(events_manager.last_events.values(), mimetype='text/event-stream')
def pop_queue(current_event_queue):
while not events_manager.stopped:
try:
data = current_event_queue.get(timeout=0.1)
yield data
except queue.Empty:
#This makes the server quit nicely - previously the queue threads would block and never exit.
# This makes it keep checking for dead application
pass
def purge_streams():
big_queues = [port for port, queue in events_manager.events_queue.items()
if queue.qsize() > events_manager.MAX_QUEUE_LENGTH]
for big_queue in big_queues:
current_app.logger.info('Client %s is stale. Disconnecting. Total Clients: %s' %
(big_queue, events_manager.events_queue.qsize()))
del queue[big_queue]
def close_stream(*args, **kwargs):
event_stream_port = args[2][1]
del events_manager.events_queue[event_stream_port]
log.info('Client %s disconnected. Total Clients: %s' % (event_stream_port, len(events_manager.events_queue)))
def run_sample_app():
import socketserver
socketserver.BaseServer.handle_error = close_stream
import app
app.run(flask_app, events_manager)
if __name__ == "__main__":
run_sample_app()
|
"""
Collection of Numpy linear algebra functions, wrapped to fit Ivy syntax and signature.
"""
# global
import numpy as _np
import ivy as _ivy
from typing import Union, Tuple
from collections import namedtuple
svd = _np.linalg.svd
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = (-2, -1) if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
elif isinstance(axes, list):
axes = tuple(axes)
ret = _np.array(_np.linalg.norm(x, p, axes, keepdims))
if ret.shape == ():
return _np.expand_dims(ret, 0)
return ret
inv = _np.linalg.inv
pinv = _np.linalg.pinv
cholesky = _np.linalg.cholesky
def vector_to_skew_symmetric_matrix(vector):
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = _np.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = _np.zeros(batch_shape + [1, 1])
# BS x 1 x 3
row1 = _np.concatenate((zs, -a3s, a2s), -1)
row2 = _np.concatenate((a3s, zs, -a1s), -1)
row3 = _np.concatenate((-a2s, a1s, zs), -1)
# BS x 3 x 3
return _np.concatenate((row1, row2, row3), -2)
def slogdet(x:Union[_ivy.Array,_ivy.NativeArray],full_matrices: bool = True) -> Union[_ivy.Array, Tuple[_ivy.Array,...]]:
results = namedtuple("slogdet", "sign logabsdet")
sign, logabsdet = _np.linalg.slogdet(x)
res = results(sign, logabsdet)
return res
def det(x:Union[_ivy.Array,_ivy.NativeArray],full_matrices: bool = True) -> Union[_ivy.Array, Tuple[_ivy.Array,...]]:
d = _np.linalg.det(x)
return d
|
"""We want to share these two methods between the ShopItemsView class and also the Patch methods,
so they are implemented in a separate helper file here."""
from sqlalchemy import and_
from app import db
from apps.shop.models import ShopCategories, ShopItemsCategoriesMapping, ShopItemsURLMapping
def add_categories(item_id, categories):
"""Add categories for the given shopitem. Categories need to be checked. Integers should
refer to existing Categories, which will refer to if they exist. Otherwise we skip the
category. If the category is a string, we add it as new category, unless the string already
exists."""
for cat in categories:
valid_category_id = None
if type(cat) is int:
# Possibly existing category
exists = ShopCategories.query.filter_by(ShopCategoryID=cat).first()
if exists:
valid_category_id = exists.ShopCategoryID
else:
# Non-existing ID, skipping...
print("")
continue
else:
# New category, but let's check the name does not exist already
# NB: The value should be a dict like {"category": "abc", "subcategory": "xyz"}
exists = ShopCategories.query.filter(
and_(
ShopCategories.Category == cat["category"],
ShopCategories.SubCategory == cat["subcategory"]
)
).first()
if exists:
valid_category_id = exists.ShopCategoryID
else:
# Does not exist. Let's add it.
new_cat = ShopCategories(
Category=cat["category"],
SubCategory=cat["subcategory"]
)
db.session.add(new_cat)
db.session.commit()
valid_category_id = new_cat.ShopCategoryID
# Add the mapping
mapping = ShopItemsCategoriesMapping(
ShopItemID=item_id,
ShopCategoryID=valid_category_id
)
db.session.add(mapping)
db.session.commit()
def add_urls(item_id, urls):
"""Add URLs for the given shopitem. URLs do not need any check-up logic. We'll just insert
the data as-is."""
for url in urls:
new_url = ShopItemsURLMapping(
ShopItemID=item_id,
URLTitle=url["title"],
URL=url["url"],
ShopItemLogoID=url["logoID"],
)
db.session.add(new_url)
db.session.commit()
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from decimal import Decimal
from functools import partial, lru_cache
from typing import (NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any,
Sequence, Iterable)
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem, QImage,
QPalette, QIcon, QFontMetrics, QShowEvent, QPainter, QHelpEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale, QAbstractItemModel,
QEvent, QRect, QPoint, QObject)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu, QStyleOptionViewItem, QLayout, QLayoutItem,
QGraphicsEffect, QGraphicsScene, QGraphicsPixmapItem)
from electrum_plcu.i18n import _, languages
from electrum_plcu.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum_plcu.invoices import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING, PR_UNCONFIRMED
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .installwizard import InstallWizard
from electrum_plcu.simple_config import SimpleConfig
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
PR_UNCONFIRMED:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QToolButton):
def __init__(self, text):
QToolButton.__init__(self)
self.setText('?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
# show popup
self.show()
# refresh GUI; needed for popup to appear and for message_label to get drawn
QCoreApplication.processEvents()
QCoreApplication.processEvents()
# block and run given task
task()
# close popup
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(
*,
parent,
title,
header_layout,
ok_label,
default=None,
allow_multi=False,
transport_pin=False,
config: 'SimpleConfig'
):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi, config=config)
if default:
txt.setText(default)
l.addWidget(txt)
ok_but = OkButton(dialog, ok_label)
if transport_pin:
ok_but.setEnabled(False)
tp_layout = QHBoxLayout()
e1 = QLineEdit()
tp_layout.addWidget(QLabel("Transport PIN:"))
tp_layout.addWidget(e1)
tp_layout.addStretch(1)
l.addLayout(tp_layout)
def check_transport_pin_correct():
ok_but.setEnabled(len(e1.text()) >= 4)
e1.textChanged.connect(check_transport_pin_correct)
l.addLayout(Buttons(CancelButton(dialog), ok_but))
if dialog.exec_():
text = txt.toPlainText()
if transport_pin:
text += '\n'
text += e1.text()
return text
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, defaultname)
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if defaultname.endswith(".csv") else "*.json" if defaultname.endswith(".json") else None
p = getSaveFileName(
parent=None,
title=select_msg,
filename=text,
filter=_filter,
config=config,
)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
self.tv.is_editor_open = False
if self.tv._pending_update:
self.tv.update()
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
edit_key = self.tv.get_edit_key_from_coordinate(row, col)
assert edit_key is not None, (idx.row(), idx.column())
self.tv.on_edited(idx, edit_key=edit_key, text=new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
self.tv.is_editor_open = True
return super().createEditor(parent, option, idx)
def paint(self, painter: QPainter, option: QStyleOptionViewItem, idx: QModelIndex) -> None:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().paint(painter, option, idx)
else:
# let's call the default paint method first; to paint the background (e.g. selection)
super().paint(painter, option, idx)
# and now paint on top of that
custom_data.paint(painter, option.rect)
def helpEvent(self, evt: QHelpEvent, view: QAbstractItemView, option: QStyleOptionViewItem, idx: QModelIndex) -> bool:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().helpEvent(evt, view, option, idx)
else:
if evt.type() == QEvent.ToolTip:
if custom_data.show_tooltip(evt):
return True
return super().helpEvent(evt, view, option, idx)
def sizeHint(self, option: QStyleOptionViewItem, idx: QModelIndex) -> QSize:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().sizeHint(option, idx)
else:
default_size = super().sizeHint(option, idx)
return custom_data.sizeHint(default_size)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
ROLE_CUSTOM_PAINT = Qt.UserRole + 101
ROLE_EDIT_KEY = Qt.UserRole + 102
ROLE_FILTER_DATA = Qt.UserRole + 103
filter_columns: Iterable[int]
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is None:
editable_columns = []
self.editable_columns = set(editable_columns)
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.is_editor_open = False
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def get_role_data_for_current_item(self, *, col, role) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.item_from_index(idx)
if item:
return item.data(role)
def item_from_index(self, idx: QModelIndex) -> Optional[QStandardItem]:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
idx = model.mapToSource(idx)
return model.sourceModel().itemFromIndex(idx)
else:
return model.itemFromIndex(idx)
def original_model(self) -> QAbstractItemModel:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
return model.sourceModel()
else:
return model
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
self.original_model().setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, edit_key, *, text: str) -> None:
raise NotImplementedError()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def get_text_from_coordinate(self, row, col) -> str:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
return item.text()
def get_role_data_from_coordinate(self, row, col, *, role) -> Any:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
role_data = item.data(role)
return role_data
def get_edit_key_from_coordinate(self, row, col) -> Any:
# overriding this might allow avoiding storing duplicate data
return self.get_role_data_from_coordinate(row, col, role=self.ROLE_EDIT_KEY)
def get_filter_data_from_coordinate(self, row, col) -> str:
filter_data = self.get_role_data_from_coordinate(row, col, role=self.ROLE_FILTER_DATA)
if filter_data:
return filter_data
txt = self.get_text_from_coordinate(row, col)
txt = txt.lower()
return txt
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
filter_data = self.get_filter_data_from_coordinate(row_num, column)
if self.current_filter in filter_data:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.original_model().horizontalHeaderItem(column).text()
if not column_title:
continue
item_col = self.item_from_index(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = (not self._forced_update
and (not self.isVisible() or self.is_editor_open))
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if data1 is not None and data2 is not None:
return data1 < data2
v1 = item1.text()
v2 = item2.text()
try:
return Decimal(v1) < Decimal(v2)
except:
return v1 < v2
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = [] # type: List[QToolButton]
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
def addPasteButton(self, app):
self.app = app
self.addButton("copy.png", self.on_paste, _("Paste from clipboard"))
def on_paste(self):
self.setText(self.app.clipboard().text())
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
self.exit()
self.wait()
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
GRAY = ColorSchemeItem("gray", "gray")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window: 'ElectrumWindow', title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = getOpenFileName(
parent=electrum_window,
title=_("Open {} file").format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window: 'ElectrumWindow', title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = getSaveFileName(
parent=electrum_window,
title=_("Select file to save your {}").format(title),
filename='electrum-plcu_{}.json'.format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def getOpenFileName(*, parent, title, filter="", config: 'SimpleConfig') -> Optional[str]:
"""Custom wrapper for getOpenFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(
*,
parent,
title,
filename,
filter="",
default_extension: str = None,
default_filter: str = None,
config: 'SimpleConfig',
) -> Optional[str]:
"""Custom wrapper for getSaveFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(parent, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
class IconLabel(QWidget):
IconSize = QSize(16, 16)
HorizontalSpacing = 2
def __init__(self, *, text='', final_stretch=True):
super(QWidget, self).__init__()
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.icon = QLabel()
self.label = QLabel(text)
self.label.setTextInteractionFlags(Qt.TextSelectableByMouse)
layout.addWidget(self.label)
layout.addSpacing(self.HorizontalSpacing)
layout.addWidget(self.icon)
if final_stretch:
layout.addStretch()
def setText(self, text):
self.label.setText(text)
def setIcon(self, icon):
self.icon.setPixmap(icon.pixmap(self.IconSize))
self.icon.repaint() # macOS hack for #6269
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
os._exit(0)
else:
webbrowser.open(url)
class FixedAspectRatioLayout(QLayout):
def __init__(self, parent: QWidget = None, aspect_ratio: float = 1.0):
super().__init__(parent)
self.aspect_ratio = aspect_ratio
self.items: List[QLayoutItem] = []
def set_aspect_ratio(self, aspect_ratio: float = 1.0):
self.aspect_ratio = aspect_ratio
self.update()
def addItem(self, item: QLayoutItem):
self.items.append(item)
def count(self) -> int:
return len(self.items)
def itemAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items[index]
def takeAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items.pop(index)
def _get_contents_margins_size(self) -> QSize:
margins = self.contentsMargins()
return QSize(margins.left() + margins.right(), margins.top() + margins.bottom())
def setGeometry(self, rect: QRect):
super().setGeometry(rect)
if not self.items:
return
contents = self.contentsRect()
if contents.height() > 0:
c_aratio = contents.width() / contents.height()
else:
c_aratio = 1
s_aratio = self.aspect_ratio
item_rect = QRect(QPoint(0, 0), QSize(
contents.width() if c_aratio < s_aratio else int(contents.height() * s_aratio),
contents.height() if c_aratio > s_aratio else int(contents.width() / s_aratio)
))
content_margins = self.contentsMargins()
free_space = contents.size() - item_rect.size()
for item in self.items:
if free_space.width() > 0 and not item.alignment() & Qt.AlignLeft:
if item.alignment() & Qt.AlignRight:
item_rect.moveRight(contents.width() + content_margins.right())
else:
item_rect.moveLeft(content_margins.left() + (free_space.width() // 2))
else:
item_rect.moveLeft(content_margins.left())
if free_space.height() > 0 and not item.alignment() & Qt.AlignTop:
if item.alignment() & Qt.AlignBottom:
item_rect.moveBottom(contents.height() + content_margins.bottom())
else:
item_rect.moveTop(content_margins.top() + (free_space.height() // 2))
else:
item_rect.moveTop(content_margins.top())
item.widget().setGeometry(item_rect)
def sizeHint(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.sizeHint())
return self._get_contents_margins_size() + result
def minimumSize(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.minimumSize())
return self._get_contents_margins_size() + result
def expandingDirections(self) -> Qt.Orientations:
return Qt.Horizontal | Qt.Vertical
def QColorLerp(a: QColor, b: QColor, t: float):
"""
Blends two QColors. t=0 returns a. t=1 returns b. t=0.5 returns evenly mixed.
"""
t = max(min(t, 1.0), 0.0)
i_t = 1.0 - t
return QColor(
int((a.red() * i_t) + (b.red() * t)),
int((a.green() * i_t) + (b.green() * t)),
int((a.blue() * i_t) + (b.blue() * t)),
int((a.alpha() * i_t) + (b.alpha() * t)),
)
class ImageGraphicsEffect(QObject):
"""
Applies a QGraphicsEffect to a QImage
"""
def __init__(self, parent: QObject, effect: QGraphicsEffect):
super().__init__(parent)
assert effect, 'effect must be set'
self.effect = effect
self.graphics_scene = QGraphicsScene()
self.graphics_item = QGraphicsPixmapItem()
self.graphics_item.setGraphicsEffect(effect)
self.graphics_scene.addItem(self.graphics_item)
def apply(self, image: QImage):
assert image, 'image must be set'
result = QImage(image.size(), QImage.Format_ARGB32)
result.fill(Qt.transparent)
painter = QPainter(result)
self.graphics_item.setPixmap(QPixmap.fromImage(image))
self.graphics_scene.render(painter)
self.graphics_item.setPixmap(QPixmap())
return result
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import account_invoice
from . import crm_lead
from . import crm_team
from . import res_users
from . import sale_order
|
#! /usr/bin/env python3
"""Unit tests for ulist
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from smartcard.ulist import ulist
class C(ulist):
def __onadditem__(self, item):
#print '+', item
pass
def __onremoveitem__(self, item):
#print '-', item
pass
class testcase_ulist(unittest.TestCase):
"""Test smartcard.ulist."""
def testcase_ulist_init(self):
"""tests constructor"""
c = C([1, 2, 3, 3, 4, 5, 5])
self.assertEqual([1, 2, 3, 4, 5], c)
c = C(['one', 'two', 'three', 'one'])
self.assertEqual(['one', 'two', 'three'], c)
def testcase_ulist_add(self):
"""tests l=l+other"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
c = c + []
self.assertEqual(seed, c)
c = c + 4
self.assertEqual(seed + [4], c)
c = c + 4
self.assertEqual(seed + [4], c)
c = c + 'word'
self.assertEqual(seed + [4] + ['word'], c)
seed = ['one', 'two', 'three']
c = C(seed)
self.assertEqual(seed, c)
c = c + ['four', 'five']
self.assertEqual(seed + ['four', 'five'], c)
def testcase_ulist_iadd(self):
"""tests l+=other"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
c += []
self.assertEqual(seed, c)
c += 4
self.assertEqual(seed + [4], c)
c += 4
self.assertEqual(seed + [4], c)
c += [4, 3, 2, 1]
self.assertEqual(seed + [4], c)
c += 'word'
self.assertEqual(seed + [4] + ['word'], c)
seed = ['one', 'two', 'three']
c = C(seed)
self.assertEqual(seed, c)
c += ['four', 'five']
self.assertEqual(seed + ['four', 'five'], c)
def testcase_ulist_radd(self):
"""tests l=other+l"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
l = [] + c
self.assertEqual(seed, l)
l = [3] + c
self.assertEqual(seed, c)
self.assertEqual(seed, l)
l = [3, 3, 4, 4] + c
self.assertEqual(seed, c)
self.assertEqual(seed + [4], l)
l = [4] + ['word'] + c
self.assertEqual(seed, c)
self.assertEqual(seed + [4] + ['word'], l)
def testcase_ulist_append(self):
seed = [1, 2, 3]
c = C(seed)
c.append(4)
self.assertEqual(seed + [4], c)
c.append(4)
self.assertEqual(seed + [4], c)
c.append('word')
self.assertEqual(seed + [4] + ['word'], c)
def testcase_ulist_insert(self):
seed = [1, 2, 3]
c = C(seed)
c.insert(0, 0)
self.assertEqual([0] + seed, c)
c.insert(1, 0)
self.assertEqual([0] + seed, c)
def testcase_ulist_pop(self):
seed = [1, 2, 3]
c = C(seed)
c.pop()
self.assertEqual(c, [1, 2])
c.pop(1)
self.assertEqual(c, [1])
def testcase_ulist_remove(self):
seed = [1, 2, 3]
c = C(seed)
c.remove(2)
self.assertEqual(c, [1, 3])
c.remove(1)
self.assertEqual(c, [3])
def suite():
suite1 = unittest.makeSuite(testcase_ulist)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
|
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
ngridx = 100
ngridy = 200
# define the hyperparameter space (epsilon = 1-20, C=1-10)
epsilon_min = 1
epsilon_max = 10
C_min = 1
C_max =10
epsilon = list(np.arange(epsilon_min,epsilon_max,1))
C = list(np.arange(C_min,C_max,1))
#calculate cv_score for each hyperparameter combination
cv_scores, c_settings, epsilon_settings = grid_search(epsilon, C)
#define plot dimensions
x_plot = c_settings
y_plot = epsilon_settings
z_plot = cv_scores
#define figure
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 8))
# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.
# Create grid values first.
xi = np.linspace(min(x_plot)-1, max(x_plot)+1, ngridx)
yi = np.linspace(min(y_plot)-1, max(y_plot)+1, ngridy)
# Perform linear interpolation of the data (x,y)
# on a grid defined by (xi,yi)
triang = tri.Triangulation(x_plot, y_plot)
interpolator = tri.LinearTriInterpolator(triang, z_plot)
Xi, Yi = np.meshgrid(xi, yi)
zi = interpolator(Xi, Yi)
# Note that scipy.interpolate provides means to interpolate data on a grid
# as well. The following would be an alternative to the four lines above:
ax1.contour(xi, yi, zi, levels=14, linewidths=0.5, colors='k')
cntr1 = ax1.contourf(xi, yi, zi, levels=14, cmap="RdBu_r")
fig.colorbar(cntr1, ax=ax1)
ax1.plot(x, y, 'ko', ms=3)
ax1.set(xlim=(min(x), max(x)), ylim=(min(y), max(y)))
ax1.set_title('grid and contour (%d points, %d grid points)' %
(npts, ngridx * ngridy))
ax1.set_xlabel('C')
ax1.set_ylabel('Epsilon')
# ----------
# Tricontour
# ----------
# Directly supply the unordered, irregularly spaced coordinates
# to tricontour.
# define the hyperparameter space (epsilon = 1-20, C=1-10)
epsilon = list(np.arange(1,10,1))
C = list(np.arange(5,10,1))
cv_scores, c_settings, epsilon_settings = grid_search(epsilon, C)
x_plot2 = c_settings
y_plot2 = epsilon_settings
z_plot2 = cv_scores
ax2.tricontour(x, y, z, levels=100, linewidths=0.5, colors='k')
cntr2 = ax2.tricontourf(x_plot2, y_plot2, z_plot2, levels=14, cmap="RdBu_r")
fig.colorbar(cntr2, ax=ax2)
ax2.plot(x_plot2, y_plot2, 'ko', ms=3)
ax2.set(xlim=(min(x_plot2), max(x_plot2)), ylim=(min(y_plot2), max(y_plot2)))
ax2.set_title('tricontour (%d points)' % npts)
ax2.set_xlabel('C')
ax2.set_ylabel('Epsilon')
plt.subplots_adjust(hspace=0.5)
plt.show()
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import numpy as np
def grid_search(epsilon, C):
cv_scores = []
c_settings = []
epsilon_settings = []
for n in range(len(epsilon)):
for i in range(len(C)):
ax = plt.subplot(1, 1, 1)
plt.setp(ax, xticks=(), yticks=())
# support vector regression
pipeline = make_pipeline(StandardScaler(), SVR(C=C[i], epsilon=epsilon[n]))
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring="neg_mean_squared_error", cv=10)
cv_scores.append(round(scores.mean(), 2))
c_settings.append(C[i])
epsilon_settings.append(epsilon[n])
return cv_scores, c_settings, epsilon_settings
# plt.plot(X_test, true_fun(X_test), label="True function")
# plt.scatter(c_settings, cv_scores, edgecolor='b', s=20, label="Samples")
# plt.xlabel("x")
# plt.ylabel("y")
# plt.xlim((0, 0.2))
# plt.ylim((-2, 2))
# plt.legend(loc="best")
# plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
# degrees[i], -scores.mean(), scores.std()))
# plt.show()
# plt.savefig("polynomial_regression_example.png", dpi=150)
|
from notetool.secret.secret import (SecretManage, read_secret, set_secret_path,
write_secret)
from notetool.tool.build import get_version, version_add
from .compress import decompress
from .log import log, logger
from .path import delete_file, exists_file, path_parse, rename
|
# -*- coding: utf-8 -*-
import json
import os
import subprocess
import sys
from distutils.spawn import find_executable
import click
import frappe
from frappe.commands import get_site, pass_context
from frappe.exceptions import SiteNotSpecifiedError
from frappe.utils import get_bench_path, update_progress_bar, cint
@click.command('build')
@click.option('--app', help='Build assets for app')
@click.option('--hard-link', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--make-copy', is_flag=True, default=False, help='[DEPRECATED] Copy the files instead of symlinking')
@click.option('--restore', is_flag=True, default=False, help='[DEPRECATED] Copy the files instead of symlinking with force')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', is_flag=True, default=False, help='Force build assets instead of downloading available')
def build(app=None, hard_link=False, make_copy=False, restore=False, verbose=False, force=False):
"Minify + concatenate JS and CSS files, build translations"
frappe.init('')
# don't minify in developer_mode for faster builds
no_compress = frappe.local.conf.developer_mode or False
# dont try downloading assets if force used, app specified or running via CI
if not (force or app or os.environ.get('CI')):
# skip building frappe if assets exist remotely
skip_frappe = frappe.build.download_frappe_assets(verbose=verbose)
else:
skip_frappe = False
if make_copy or restore:
hard_link = make_copy or restore
click.secho(
"bench build: --make-copy and --restore options are deprecated in favour of --hard-link",
fg="yellow",
)
frappe.build.bundle(
skip_frappe=skip_frappe,
no_compress=no_compress,
hard_link=hard_link,
verbose=verbose,
app=app,
)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('destroy-all-sessions')
@click.option('--reason')
@pass_context
def destroy_all_sessions(context, reason=None):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions(reason)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('show-config')
@click.option("--format", "-f", type=click.Choice(["text", "json"]), default="text")
@pass_context
def show_config(context, format):
"Print configuration file to STDOUT in speified format"
if not context.sites:
raise SiteNotSpecifiedError
sites_config = {}
sites_path = os.getcwd()
from frappe.utils.commands import render_table
def transform_config(config, prefix=None):
prefix = f"{prefix}." if prefix else ""
site_config = []
for conf, value in config.items():
if isinstance(value, dict):
site_config += transform_config(value, prefix=f"{prefix}{conf}")
else:
log_value = json.dumps(value) if isinstance(value, list) else value
site_config += [[f"{prefix}{conf}", log_value]]
return site_config
for site in context.sites:
frappe.init(site)
if len(context.sites) != 1 and format == "text":
if context.sites.index(site) != 0:
click.echo()
click.secho(f"Site {site}", fg="yellow")
configuration = frappe.get_site_config(sites_path=sites_path, site_path=site)
if format == "text":
data = transform_config(configuration)
data.insert(0, ['Config','Value'])
render_table(data)
if format == "json":
sites_config[site] = configuration
frappe.destroy()
if format == "json":
click.echo(frappe.as_json(sites_config))
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where istable=0 and custom=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('execute')
@click.argument('method')
@click.option('--args')
@click.option('--kwargs')
@click.option('--profile', is_flag=True, default=False)
@pass_context
def execute(context, method, args=None, kwargs=None, profile=False):
"Execute a function"
for site in context.sites:
ret = ""
try:
frappe.init(site=site)
frappe.connect()
if args:
try:
args = eval(args)
except NameError:
args = [args]
else:
args = ()
if kwargs:
kwargs = eval(kwargs)
else:
kwargs = {}
if profile:
import cProfile
pr = cProfile.Profile()
pr.enable()
try:
ret = frappe.get_attr(method)(*args, **kwargs)
except Exception:
ret = frappe.safe_eval(method + "(*args, **kwargs)", eval_globals=globals(), eval_locals=locals())
if profile:
import pstats
from six import StringIO
pr.disable()
s = StringIO()
pstats.Stats(pr, stream=s).sort_stats('cumulative').print_stats(.5)
print(s.getvalue())
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
from frappe.utils.response import json_handler
print(json.dumps(ret, default=json_handler))
if not context.sites:
raise SiteNotSpecifiedError
@click.command('add-to-email-queue')
@click.argument('email-path')
@pass_context
def add_to_email_queue(context, email_path):
"Add an email to the Email Queue"
site = get_site(context)
if os.path.isdir(email_path):
with frappe.init_site(site):
frappe.connect()
for email in os.listdir(email_path):
with open(os.path.join(email_path, email)) as email_data:
kwargs = json.load(email_data)
kwargs['delayed'] = True
frappe.sendmail(**kwargs)
frappe.db.commit()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-json')
@click.argument('doctype')
@click.argument('path')
@click.option('--name', help='Export only one document')
@pass_context
def export_json(context, doctype, path, name=None):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.doctype.data_import.data_import import export_json
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_json(doctype, path, name=name)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Export data import template with data for DocType"
from frappe.core.doctype.data_import.data_import import export_csv
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_csv(doctype, path)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-fixtures')
@click.option('--app', default=None, help='Export fixtures of a specific app')
@pass_context
def export_fixtures(context, app=None):
"Export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures(app=app)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.doctype.data_import.data_import import import_doc
if not os.path.exists(path):
path = os.path.join('..', path)
if not os.path.exists(path):
print('Invalid path {0}'.format(path))
sys.exit(1)
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
import_doc(path)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('import-csv')
@click.argument('path')
@click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records')
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode')
@click.option('--no-email', default=True, is_flag=True, help='Send email if applicable')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False, no_email=True):
"Import CSV using data import"
from frappe.core.doctype.data_import_legacy import importer
from frappe.utils.csvutils import read_csv_content
site = get_site(context)
if not os.path.exists(path):
path = os.path.join('..', path)
if not os.path.exists(path):
print('Invalid path {0}'.format(path))
sys.exit(1)
with open(path, 'r') as csvfile:
content = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
try:
importer.upload(content, submit_after_import=submit_after_import, no_email=no_email,
ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert,
via_console=True)
frappe.db.commit()
except Exception:
print(frappe.get_traceback())
frappe.destroy()
@click.command('data-import')
@click.option('--file', 'file_path', type=click.Path(), required=True, help="Path to import file (.csv, .xlsx)")
@click.option('--doctype', type=str, required=True)
@click.option('--type', 'import_type', type=click.Choice(['Insert', 'Update'], case_sensitive=False), default='Insert', help="Insert New Records or Update Existing Records")
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--mute-emails', default=True, is_flag=True, help='Mute emails during import')
@pass_context
def data_import(context, file_path, doctype, import_type=None, submit_after_import=False, mute_emails=True):
"Import documents in bulk from CSV or XLSX using data import"
from frappe.core.doctype.data_import.data_import import import_file
site = get_site(context)
frappe.init(site=site)
frappe.connect()
import_file(doctype, file_path, import_type, submit_after_import, console=True)
frappe.destroy()
@click.command('bulk-rename')
@click.argument('doctype')
@click.argument('path')
@pass_context
def bulk_rename(context, doctype, path):
"Rename multiple records via CSV file"
from frappe.model.rename_doc import bulk_rename
from frappe.utils.csvutils import read_csv_content
site = get_site(context)
with open(path, 'r') as csvfile:
rows = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
bulk_rename(doctype, rows, via_console = True)
frappe.destroy()
@click.command('mariadb')
@pass_context
def mariadb(context):
"""
Enter into mariadb console for a given site.
"""
import os
site = get_site(context)
if not site:
raise SiteNotSpecifiedError
frappe.init(site=site)
# This is assuming you're within the bench instance.
mysql = find_executable('mysql')
os.execv(mysql, [
mysql,
'-u', frappe.conf.db_name,
'-p'+frappe.conf.db_password,
frappe.conf.db_name,
'-h', frappe.conf.db_host or "localhost",
'--pager=less -SFX',
'--safe-updates',
"-A"])
@click.command('postgres')
@pass_context
def postgres(context):
"""
Enter into postgres console for a given site.
"""
site = get_site(context)
frappe.init(site=site)
# This is assuming you're within the bench instance.
psql = find_executable('psql')
subprocess.run([ psql, '-d', frappe.conf.db_name])
@click.command('jupyter')
@pass_context
def jupyter(context):
installed_packages = (r.split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'], encoding='utf8'))
if 'jupyter' not in installed_packages:
subprocess.check_output([sys.executable, '-m', 'pip', 'install', 'jupyter'])
site = get_site(context)
frappe.init(site=site)
jupyter_notebooks_path = os.path.abspath(frappe.get_site_path('jupyter_notebooks'))
sites_path = os.path.abspath(frappe.get_site_path('..'))
try:
os.stat(jupyter_notebooks_path)
except OSError:
print('Creating folder to keep jupyter notebooks at {}'.format(jupyter_notebooks_path))
os.mkdir(jupyter_notebooks_path)
bin_path = os.path.abspath('../env/bin')
print('''
Starting Jupyter notebook
Run the following in your first cell to connect notebook to frappe
```
import frappe
frappe.init(site='{site}', sites_path='{sites_path}')
frappe.connect()
frappe.local.lang = frappe.db.get_default('lang')
frappe.db.connect()
```
'''.format(site=site, sites_path=sites_path))
os.execv('{0}/jupyter'.format(bin_path), [
'{0}/jupyter'.format(bin_path),
'notebook',
jupyter_notebooks_path,
])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
import warnings
site = get_site(context)
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
import IPython
all_apps = frappe.get_installed_apps()
failed_to_import = []
for app in all_apps:
try:
locals()[app] = __import__(app)
except ModuleNotFoundError:
failed_to_import.append(app)
all_apps.remove(app)
print("Apps in this namespace:\n{}".format(", ".join(all_apps)))
if failed_to_import:
print("\nFailed to import:\n{}".format(", ".join(failed_to_import)))
warnings.simplefilter('ignore')
IPython.embed(display_banner="", header="", colors="neutral")
@click.command('run-tests')
@click.option('--app', help="For App")
@click.option('--doctype', help="For DocType")
@click.option('--doctype-list-path', help="Path to .txt file for list of doctypes. Example erpnext/tests/server/agriculture.txt")
@click.option('--test', multiple=True, help="Specific test")
@click.option('--ui-tests', is_flag=True, default=False, help="Run UI Tests")
@click.option('--module', help="Run tests in a module")
@click.option('--profile', is_flag=True, default=False)
@click.option('--coverage', is_flag=True, default=False)
@click.option('--skip-test-records', is_flag=True, default=False, help="Don't create test records")
@click.option('--skip-before-tests', is_flag=True, default=False, help="Don't run before tests hook")
@click.option('--junit-xml-output', help="Destination file path for junit xml report")
@click.option('--failfast', is_flag=True, default=False, help="Stop the test run on the first error or failure")
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), profile=False,
coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None,
skip_test_records=False, skip_before_tests=False, failfast=False):
"Run tests"
import frappe.test_runner
tests = test
site = get_site(context)
allow_tests = frappe.get_conf(site).allow_tests
if not (allow_tests or os.environ.get('CI')):
click.secho('Testing is disabled for the site!', bold=True)
click.secho('You can enable tests by entering following command:')
click.secho('bench --site {0} set-config allow_tests true'.format(site), fg='green')
return
frappe.init(site=site)
frappe.flags.skip_before_tests = skip_before_tests
frappe.flags.skip_test_records = skip_test_records
if coverage:
from coverage import Coverage
from frappe.coverage import STANDARD_INCLUSIONS, STANDARD_EXCLUSIONS, FRAPPE_EXCLUSIONS
# Generate coverage report only for app that is being tested
source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe')
omit = STANDARD_EXCLUSIONS[:]
if not app or app == 'frappe':
omit.extend(FRAPPE_EXCLUSIONS)
cov = Coverage(source=[source_path], omit=omit, include=STANDARD_INCLUSIONS)
cov.start()
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests,
force=context.force, profile=profile, junit_xml_output=junit_xml_output,
ui_tests=ui_tests, doctype_list_path=doctype_list_path, failfast=failfast)
if coverage:
cov.stop()
cov.save()
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
if os.environ.get('CI'):
sys.exit(ret)
@click.command('run-parallel-tests')
@click.option('--app', help="For App", default='frappe')
@click.option('--build-number', help="Build number", default=1)
@click.option('--total-builds', help="Total number of builds", default=1)
@click.option('--with-coverage', is_flag=True, help="Build coverage file")
@click.option('--use-orchestrator', is_flag=True, help="Use orchestrator to run parallel tests")
@pass_context
def run_parallel_tests(context, app, build_number, total_builds, with_coverage=False, use_orchestrator=False):
site = get_site(context)
if use_orchestrator:
from frappe.parallel_test_runner import ParallelTestWithOrchestrator
ParallelTestWithOrchestrator(app, site=site, with_coverage=with_coverage)
else:
from frappe.parallel_test_runner import ParallelTestRunner
ParallelTestRunner(app, site=site, build_number=build_number, total_builds=total_builds, with_coverage=with_coverage)
@click.command('run-ui-tests')
@click.argument('app')
@click.option('--headless', is_flag=True, help="Run UI Test in headless mode")
@click.option('--parallel', is_flag=True, help="Run UI Test in parallel mode")
@click.option('--ci-build-id')
@pass_context
def run_ui_tests(context, app, headless=False, parallel=True, ci_build_id=None):
"Run UI tests"
site = get_site(context)
app_base_path = os.path.abspath(os.path.join(frappe.get_app_path(app), '..'))
site_url = frappe.utils.get_site_url(site)
admin_password = frappe.get_conf(site).admin_password
# override baseUrl using env variable
site_env = f'CYPRESS_baseUrl={site_url}'
password_env = f'CYPRESS_adminPassword={admin_password}' if admin_password else ''
os.chdir(app_base_path)
node_bin = subprocess.getoutput("npm bin")
cypress_path = f"{node_bin}/cypress"
plugin_path = f"{node_bin}/../cypress-file-upload"
testing_library_path = f"{node_bin}/../@testing-library"
# check if cypress in path...if not, install it.
if not (
os.path.exists(cypress_path)
and os.path.exists(plugin_path)
and os.path.exists(testing_library_path)
and cint(subprocess.getoutput("npm view cypress version")[:1]) >= 6
):
# install cypress
click.secho("Installing Cypress...", fg="yellow")
frappe.commands.popen("yarn add cypress@^6 cypress-file-upload@^5 @testing-library/cypress@^8 --no-lockfile")
# run for headless mode
run_or_open = 'run --browser chrome --record' if headless else 'open'
command = '{site_env} {password_env} {cypress} {run_or_open}'
formatted_command = command.format(site_env=site_env, password_env=password_env, cypress=cypress_path, run_or_open=run_or_open)
if parallel:
formatted_command += ' --parallel'
if ci_build_id:
formatted_command += f' --ci-build-id {ci_build_id}'
click.secho("Running Cypress...", fg="yellow")
frappe.commands.popen(formatted_command, cwd=app_base_path, raise_err=True)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@click.option('--noreload', "no_reload", is_flag=True, default=False)
@click.option('--nothreading', "no_threading", is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, no_reload=False, no_threading=False, sites_path='.', site=None):
"Start development web server"
import frappe.app
if not context.sites:
site = None
else:
site = context.sites[0]
frappe.app.serve(port=port, profile=profile, no_reload=no_reload, no_threading=no_threading, site=site, sites_path='.')
@click.command('request')
@click.option('--args', help='arguments like `?cmd=test&key=value` or `/api/request/method?..`')
@click.option('--path', help='path to request JSON')
@pass_context
def request(context, args=None, path=None):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if args:
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
elif path:
with open(os.path.join('..', path), 'r') as f:
args = json.loads(f.read())
frappe.local.form_dict = frappe._dict(args)
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print(frappe.response)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
"Creates a boilerplate app"
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('set-config')
@click.argument('key')
@click.argument('value')
@click.option('-g', '--global', 'global_', is_flag=True, default=False, help='Set value in bench config')
@click.option('-p', '--parse', is_flag=True, default=False, help='Evaluate as Python Object')
@click.option('--as-dict', is_flag=True, default=False, help='Legacy: Evaluate as Python Object')
@pass_context
def set_config(context, key, value, global_=False, parse=False, as_dict=False):
"Insert/Update a value in site_config.json"
from frappe.installer import update_site_config
if as_dict:
from frappe.utils.commands import warn
warn("--as-dict will be deprecated in v14. Use --parse instead", category=PendingDeprecationWarning)
parse = as_dict
if parse:
import ast
value = ast.literal_eval(value)
if global_:
sites_path = os.getcwd()
common_site_config_path = os.path.join(sites_path, 'common_site_config.json')
update_site_config(key, value, validate=False, site_config_path=common_site_config_path)
else:
for site in context.sites:
frappe.init(site=site)
update_site_config(key, value, validate=False)
frappe.destroy()
@click.command("version")
@click.option("-f", "--format", "output",
type=click.Choice(["plain", "table", "json", "legacy"]), help="Output format", default="legacy")
def get_version(output):
"""Show the versions of all the installed apps."""
from git import Repo
from frappe.utils.commands import render_table
from frappe.utils.change_log import get_app_branch
frappe.init("")
data = []
for app in sorted(frappe.get_all_apps()):
module = frappe.get_module(app)
app_hooks = frappe.get_module(app + ".hooks")
repo = Repo(frappe.get_app_path(app, ".."))
app_info = frappe._dict()
app_info.app = app
app_info.branch = get_app_branch(app)
app_info.commit = repo.head.object.hexsha[:7]
app_info.version = getattr(app_hooks, f"{app_info.branch}_version", None) or module.__version__
data.append(app_info)
{
"legacy": lambda: [
click.echo(f"{app_info.app} {app_info.version}")
for app_info in data
],
"plain": lambda: [
click.echo(f"{app_info.app} {app_info.version} {app_info.branch} ({app_info.commit})")
for app_info in data
],
"table": lambda: render_table(
[["App", "Version", "Branch", "Commit"]] +
[
[app_info.app, app_info.version, app_info.branch, app_info.commit]
for app_info in data
]
),
"json": lambda: click.echo(json.dumps(data, indent=4)),
}[output]()
@click.command('rebuild-global-search')
@click.option('--static-pages', is_flag=True, default=False, help='Rebuild global search for static pages')
@pass_context
def rebuild_global_search(context, static_pages=False):
'''Setup help table in the current site (called after migrate)'''
from frappe.utils.global_search import (get_doctypes_with_global_search, rebuild_for_doctype,
get_routes_to_index, add_route_to_global_search, sync_global_search)
for site in context.sites:
try:
frappe.init(site)
frappe.connect()
if static_pages:
routes = get_routes_to_index()
for i, route in enumerate(routes):
add_route_to_global_search(route)
frappe.local.request = None
update_progress_bar('Rebuilding Global Search', i, len(routes))
sync_global_search()
else:
doctypes = get_doctypes_with_global_search()
for i, doctype in enumerate(doctypes):
rebuild_for_doctype(doctype)
update_progress_bar('Rebuilding Global Search', i, len(doctypes))
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
commands = [
build,
clear_cache,
clear_website_cache,
jupyter,
console,
destroy_all_sessions,
execute,
export_csv,
export_doc,
export_fixtures,
export_json,
get_version,
import_csv,
data_import,
import_doc,
make_app,
mariadb,
postgres,
request,
reset_perms,
run_tests,
run_ui_tests,
serve,
set_config,
show_config,
watch,
bulk_rename,
add_to_email_queue,
rebuild_global_search,
run_parallel_tests
]
|
#!/usr/bin/python
import unittest
from main import compute_precedence, compute_serial
class Test18(unittest.TestCase):
def assertComputeSerial(self, line, result):
self.assertEquals(compute_serial(line), result)
def test_compute_serial_simple(self):
self.assertComputeSerial('2', 2)
self.assertComputeSerial(' 12 ', 12)
self.assertComputeSerial('3 * 5', 15)
self.assertComputeSerial('300 + 14', 314)
self.assertComputeSerial('30 + 1 * 2', 62)
def test_compute_serial_examples(self):
self.assertComputeSerial('1 + 2 * 3 + 4 * 5 + 6', 71)
self.assertComputeSerial('1 + (2 * 3) + (4 * (5 + 6))', 51)
self.assertComputeSerial('2 * 3 + (4 * 5)', 26)
self.assertComputeSerial('5 + (8 * 3 + 9 + 3 * 4 * 3)', 437)
self.assertComputeSerial('5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))', 12240)
self.assertComputeSerial('((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2', 13632)
def assertComputePrecedence(self, line, result):
self.assertEquals(compute_precedence(line), result)
def test_compute_precedence_simple(self):
self.assertComputePrecedence('2', 2)
self.assertComputePrecedence(' 12 ', 12)
self.assertComputePrecedence('3 * 5', 15)
self.assertComputePrecedence('300 + 14', 314)
self.assertComputePrecedence('30 + 1 * 2', 62)
self.assertComputePrecedence('30 * 1 + 2', 90)
def test_compute_precedence_examples(self):
self.assertComputePrecedence('1 + 2 * 3 + 4 * 5 + 6', 231)
self.assertComputePrecedence('1 + (2 * 3) + (4 * (5 + 6))', 51)
self.assertComputePrecedence('2 * 3 + (4 * 5)', 46)
self.assertComputePrecedence('5 + (8 * 3 + 9 + 3 * 4 * 3)', 1445)
self.assertComputePrecedence('5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))', 669060)
self.assertComputePrecedence('((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2', 23340)
if __name__ == "__main__":
unittest.main()
|
# check working directory
#import os
#WORKINGDIR = os.path.normpath(os.getcwd())
#print("Current Working direcotory:\t{}".format(WORKINGDIR))
#folders = WORKINGDIR.split(os.sep)
#if folders.pop() in ['notebook', 'src', 'talks']:
# WORKINGDIR = os.sep.join(folders)
# print("Changed to New working directory:\t{dir}".format(dir=WORKINGDIR))
# os.chdir(WORKINGDIR)
|
import json
import os
def arquivo_json_existe(file_name):
return os.path.exists(file_name)
def busca_registro_no_arquivo(arquivo,registro):
for x in range(len(arquivo)):
if registro['id'] == arquivo[x]['id']:
return x
return False
def ler_arquivo_json(nome_arquivo):
with open(nome_arquivo, 'r', encoding='utf8') as f:
return json.load(f)
def gravar_arquivo_json(nome_arquivo, dados):
with open(nome_arquivo, 'w', encoding='utf-8') as f:
json.dump(dados, f, ensure_ascii=False, indent=2, sort_keys=False, separators=(',' , ':'))
def inclui_arquivo_na_base_dados(arquivo):
global nome_base_dados
arquivo_json = {}
if arquivo_json_existe(nome_base_dados): # Verifica se base de dados já existe
base_dados_json = ler_arquivo_json(nome_base_dados)
base_dados = base_dados_json['items']
# Percorre arquivo que sera incluido na base de dados
# Caso registro exista -> Atualiza
# Caso registro não exista -> Inclui no final
for x in range(len(arquivo)):
resultado = busca_registro_no_arquivo(base_dados,arquivo[x])
if type(resultado) is int:
base_dados[resultado] = arquivo[x]
else:
base_dados.append(arquivo[x])
arquivo_json['items'] = base_dados
else:
arquivo_json['items'] = arquivo
gravar_arquivo_json(nome_base_dados,arquivo_json)
return 0
#================================================================================#
# MAIN #
#================================================================================#
nome_base_dados = "base_dados_repositorios.json"
print("Informe o nome do arquivo.json que deseja incluir na base de dados: ")
nome_arquivo = input()
if arquivo_json_existe(nome_arquivo):
arquivo = ler_arquivo_json(nome_arquivo)
inclui_arquivo_na_base_dados(arquivo['items'])
else:
print(f'Erro - Arquivo {str(nome_arquivo)} não foi localizados!')
|
# -*- coding: utf-8 -*-
'''
Created on 2014年9月30日
@author: Rayleigh
'''
import KNN as kNN
from numpy import *
dataSet, labels = kNN.createDataSet()
testX = array([0.2, 0.9])
k = 3
outputLabel = kNN.kNNClassify(testX, dataSet, labels, 3)
print "Your input is:", testX, "and classified to class: ", outputLabel
testX = array([0.1, 0.3])
outputLabel = kNN.kNNClassify(testX, dataSet, labels, 3)
print "Your input is:", testX, "and classified to class: ", outputLabel
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for
from socialShrink.user.models import User
from .factories import UserFactory
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid password' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Unknown user' in res
class TestRegistering:
"""Register a user."""
def test_can_register(self, user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get('/')
# Clicks Create Account button
res = res.click('Create account')
# Fills out the form
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but passwords don't match
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert 'Passwords must match' in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but username is already registered
form = res.forms['registerForm']
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert 'Username already registered' in res
|
# (C) Datadog, Inc. 2016-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from google.protobuf.internal.decoder import _DecodeVarint32 # pylint: disable=E0611,E0401
from . import metrics_pb2
# Deprecated, please use the PrometheusCheck class
def parse_metric_family(buf):
"""
Parse the binary buffer in input, searching for Prometheus messages
of type MetricFamily [0] delimited by a varint32 [1].
[0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501
[1] https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractMessageLite#writeDelimitedTo(java.io.OutputStream) # noqa: E501
"""
n = 0
while n < len(buf):
msg_len, new_pos = _DecodeVarint32(buf, n)
n = new_pos
msg_buf = buf[n : n + msg_len]
n += msg_len
message = metrics_pb2.MetricFamily()
message.ParseFromString(msg_buf)
yield message
|
"""CrawlSpider v2"""
from .rules import Rule
from .spider import CrawlSpider
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcassandra.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cassandra', '2019-01-01', 'CreateCluster','Cassandra')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_MajorVersion(self):
return self.get_query_params().get('MajorVersion')
def set_MajorVersion(self,MajorVersion):
self.add_query_param('MajorVersion',MajorVersion)
def get_DiskSize(self):
return self.get_query_params().get('DiskSize')
def set_DiskSize(self,DiskSize):
self.add_query_param('DiskSize',DiskSize)
def get_DiskType(self):
return self.get_query_params().get('DiskType')
def set_DiskType(self,DiskType):
self.add_query_param('DiskType',DiskType)
def get_VswitchId(self):
return self.get_query_params().get('VswitchId')
def set_VswitchId(self,VswitchId):
self.add_query_param('VswitchId',VswitchId)
def get_PeriodUnit(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_DataCenterName(self):
return self.get_query_params().get('DataCenterName')
def set_DataCenterName(self,DataCenterName):
self.add_query_param('DataCenterName',DataCenterName)
def get_NodeCount(self):
return self.get_query_params().get('NodeCount')
def set_NodeCount(self,NodeCount):
self.add_query_param('NodeCount',NodeCount)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType)
|
# modified from /Users/janet/Dropbox/meta4_bins_data_and_files/170118_read_mappings_by_sample/plot_frac_mapped.py
# coding: utf-8
print('import packages...')
# In[1]:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
print('done importing packages...')
# In[2]:
info = pd.read_csv('./data/total_num_reads_across_samples_with_sample_info.tsv',
sep='\t')
# In[3]:
sample_info = info[['sample id', 'week', 'oxygen', 'replicate']].drop_duplicates()
sample_info.shape
# In[4]:
info.head()
# In[5]:
df = info.copy()
df['oxygen, rep'] = df['oxygen'].map(str) + ' oxygen, rep ' + df['replicate'].map(str) #
# In[6]:
df.head()
# In[7]:
df['frac mapped to contigs (upper-bound)'] = df['reads mapped (includes multiply mapped)']/df['number of reads']
# In[8]:
print('plotting time')
fig, axs = plt.subplots(2, 1, figsize=(10, 4))
print('fig, axs initialized')
axs_dict = {'low':axs[0], 'high':axs[1]}
axs[0].set_title('low oxygen')
axs[1].set_title('high oxygen')
color_dict = {1:'#bdc9e1', 2:'#74a9cf', 3:'#2b8cbe', 4:'#045a8d'}
print('loop through groupby dataframes')
for (o2, rep), sdf in df.groupby(['oxygen', 'replicate']):
#print(sdf.head(1))
#print(rep)
ax = axs_dict[o2]
sdf.sort_values('week', ascending=True)
color = color_dict[rep]
ax.plot(sdf['week'], sdf['frac mapped to contigs (upper-bound)'],
linestyle='-', marker='o', color=color)
labels = ['rep {}'.format(n) for n in [1, 2, 3, 4]]
axs[0].legend(labels, bbox_to_anchor=(1.15, 1.05))
lgd = axs[1].legend(labels, bbox_to_anchor=(1.15, 1.05))
plt.gcf().suptitle('170118 upper bound frac of reads aligned to contigs', size=15)
fig.subplots_adjust(top=0.85)
print('save fig')
plt.savefig('170118_approx_frac_reads_mapping_to_contigs.pdf',
bbox_extra_artists=(lgd,), bbox_inches='tight')
|
"""Provide functionality to interact with Cast devices on the network."""
import asyncio
import logging
from typing import Optional
import pychromecast
from pychromecast.controllers.homeassistant import HomeAssistantController
from pychromecast.controllers.multizone import MultizoneManager
from pychromecast.socket_client import (
CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED,
)
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.logging import async_create_catching_coro
from .const import (
ADDED_CAST_DEVICES_KEY,
CAST_MULTIZONE_MANAGER_KEY,
DEFAULT_PORT,
DOMAIN as CAST_DOMAIN,
KNOWN_CHROMECAST_INFO_KEY,
SIGNAL_CAST_DISCOVERED,
SIGNAL_CAST_REMOVED,
SIGNAL_HASS_CAST_SHOW_VIEW,
)
from .discovery import discover_chromecast, setup_internal_discovery
from .helpers import (
CastStatusListener,
ChromecastInfo,
ChromeCastZeroconf,
DynamicGroupCastStatusListener,
)
_LOGGER = logging.getLogger(__name__)
CONF_IGNORE_CEC = "ignore_cec"
CAST_SPLASH = "https://home-assistant.io/images/cast/splash.png"
SUPPORT_CAST = (
SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_IGNORE_CEC, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
@callback
def _async_create_cast_device(hass: HomeAssistantType, info: ChromecastInfo):
"""Create a CastDevice Entity from the chromecast object.
Returns None if the cast device has already been added.
"""
_LOGGER.debug("_async_create_cast_device: %s", info)
if info.uuid is None:
# Found a cast without UUID, we don't store it because we won't be able
# to update it anyway.
return CastDevice(info)
# Found a cast with UUID
if info.is_dynamic_group:
# This is a dynamic group, do not add it.
return None
added_casts = hass.data[ADDED_CAST_DEVICES_KEY]
if info.uuid in added_casts:
# Already added this one, the entity will take care of moved hosts
# itself
return None
# -> New cast device
added_casts.add(info.uuid)
return CastDevice(info)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up thet Cast platform.
Deprecated.
"""
_LOGGER.warning(
"Setting configuration for Cast via platform is deprecated. "
"Configure via Cast integration instead."
)
await _async_setup_platform(hass, config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Cast from a config entry."""
config = hass.data[CAST_DOMAIN].get("media_player", {})
if not isinstance(config, list):
config = [config]
# no pending task
done, _ = await asyncio.wait(
[_async_setup_platform(hass, cfg, async_add_entities, None) for cfg in config]
)
if any([task.exception() for task in done]):
exceptions = [task.exception() for task in done]
for exception in exceptions:
_LOGGER.debug("Failed to setup chromecast", exc_info=exception)
raise PlatformNotReady
async def _async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info
):
"""Set up the cast platform."""
# Import CEC IGNORE attributes
pychromecast.IGNORE_CEC += config.get(CONF_IGNORE_CEC, [])
hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set())
hass.data.setdefault(KNOWN_CHROMECAST_INFO_KEY, set())
info = None
if discovery_info is not None:
info = ChromecastInfo(host=discovery_info["host"], port=discovery_info["port"])
elif CONF_HOST in config:
info = ChromecastInfo(host=config[CONF_HOST], port=DEFAULT_PORT)
@callback
def async_cast_discovered(discover: ChromecastInfo) -> None:
"""Handle discovery of a new chromecast."""
if info is not None and info.host_port != discover.host_port:
# Not our requested cast device.
return
cast_device = _async_create_cast_device(hass, discover)
if cast_device is not None:
async_add_entities([cast_device])
async_dispatcher_connect(hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered)
# Re-play the callback for all past chromecasts, store the objects in
# a list to avoid concurrent modification resulting in exception.
for chromecast in list(hass.data[KNOWN_CHROMECAST_INFO_KEY]):
async_cast_discovered(chromecast)
if info is None or info.is_audio_group:
# If we were a) explicitly told to enable discovery or
# b) have an audio group cast device, we need internal discovery.
hass.async_add_executor_job(setup_internal_discovery, hass)
else:
info = await hass.async_add_executor_job(info.fill_out_missing_chromecast_info)
if info.friendly_name is None:
_LOGGER.debug(
"Cannot retrieve detail information for chromecast"
" %s, the device may not be online",
info,
)
hass.async_add_executor_job(discover_chromecast, hass, info)
class CastDevice(MediaPlayerDevice):
"""Representation of a Cast device on the network.
This class is the holder of the pychromecast.Chromecast object and its
socket client. It therefore handles all reconnects and audio group changing
"elected leader" itself.
"""
def __init__(self, cast_info: ChromecastInfo):
"""Initialize the cast device."""
self._cast_info = cast_info
self.services = None
if cast_info.service:
self.services = set()
self.services.add(cast_info.service)
self._chromecast: Optional[pychromecast.Chromecast] = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self._dynamic_group_cast_info: ChromecastInfo = None
self._dynamic_group_cast: Optional[pychromecast.Chromecast] = None
self.dynamic_group_media_status = None
self.dynamic_group_media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._available = False
self._dynamic_group_available = False
self._status_listener: Optional[CastStatusListener] = None
self._dynamic_group_status_listener: Optional[
DynamicGroupCastStatusListener
] = None
self._hass_cast_controller: Optional[HomeAssistantController] = None
self._add_remove_handler = None
self._del_remove_handler = None
self._cast_view_remove_handler = None
async def async_added_to_hass(self):
"""Create chromecast object when added to hass."""
self._add_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_DISCOVERED, self._async_cast_discovered
)
self._del_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_REMOVED, self._async_cast_removed
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_stop)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_cast_info(self._cast_info))
)
for info in self.hass.data[KNOWN_CHROMECAST_INFO_KEY]:
if self._cast_info.same_dynamic_group(info):
_LOGGER.debug(
"[%s %s (%s:%s)] Found dynamic group: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
info,
)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_dynamic_group(info))
)
break
self._cast_view_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect Chromecast object when removed."""
await self._async_disconnect()
if self._cast_info.uuid is not None:
# Remove the entity from the added casts so that it can dynamically
# be re-added again.
self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid)
if self._add_remove_handler:
self._add_remove_handler()
self._add_remove_handler = None
if self._del_remove_handler:
self._del_remove_handler()
self._del_remove_handler = None
if self._cast_view_remove_handler:
self._cast_view_remove_handler()
self._cast_view_remove_handler = None
async def async_set_cast_info(self, cast_info):
"""Set the cast information and set up the chromecast object."""
self._cast_info = cast_info
if self.services is not None:
if cast_info.service not in self.services:
_LOGGER.debug(
"[%s %s (%s:%s)] Got new service: %s (%s)",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service,
self.services,
)
self.services.add(cast_info.service)
if self._chromecast is not None:
# Only setup the chromecast once, added elements to services
# will automatically be picked up.
return
# pylint: disable=protected-access
if self.services is None:
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to cast device by host %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info,
)
chromecast = await self.hass.async_add_job(
pychromecast._get_chromecast_from_host,
(
cast_info.host,
cast_info.port,
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
else:
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to cast device by service %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
self.services,
)
chromecast = await self.hass.async_add_job(
pychromecast._get_chromecast_from_service,
(
self.services,
ChromeCastZeroconf.get_zeroconf(),
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
self._chromecast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._status_listener = CastStatusListener(self, chromecast, self.mz_mgr)
self._available = False
self.cast_status = chromecast.status
self.media_status = chromecast.media_controller.status
self._chromecast.start()
self.async_schedule_update_ha_state()
async def async_del_cast_info(self, cast_info):
"""Remove the service."""
self.services.discard(cast_info.service)
_LOGGER.debug(
"[%s %s (%s:%s)] Remove service: %s (%s)",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service,
self.services,
)
async def async_set_dynamic_group(self, cast_info):
"""Set the cast information and set up the chromecast object."""
_LOGGER.debug(
"[%s %s (%s:%s)] Connecting to dynamic group by host %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info,
)
await self.async_del_dynamic_group()
self._dynamic_group_cast_info = cast_info
# pylint: disable=protected-access
chromecast = await self.hass.async_add_executor_job(
pychromecast._get_chromecast_from_host,
(
cast_info.host,
cast_info.port,
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
),
)
self._dynamic_group_cast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._dynamic_group_status_listener = DynamicGroupCastStatusListener(
self, chromecast, mz_mgr
)
self._dynamic_group_available = False
self.dynamic_group_media_status = chromecast.media_controller.status
self._dynamic_group_cast.start()
self.async_schedule_update_ha_state()
async def async_del_dynamic_group(self):
"""Remove the dynamic group."""
cast_info = self._dynamic_group_cast_info
_LOGGER.debug(
"[%s %s (%s:%s)] Remove dynamic group: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
cast_info.service if cast_info else None,
)
self._dynamic_group_available = False
self._dynamic_group_cast_info = None
if self._dynamic_group_cast is not None:
await self.hass.async_add_executor_job(self._dynamic_group_cast.disconnect)
self._dynamic_group_invalidate()
self.async_schedule_update_ha_state()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug(
"[%s %s (%s:%s)] Disconnecting from chromecast socket.",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
)
self._available = False
self.async_schedule_update_ha_state()
await self.hass.async_add_executor_job(self._chromecast.disconnect)
if self._dynamic_group_cast is not None:
await self.hass.async_add_executor_job(self._dynamic_group_cast.disconnect)
self._invalidate()
self.async_schedule_update_ha_state()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._hass_cast_controller = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
def _dynamic_group_invalidate(self):
"""Invalidate some attributes."""
self._dynamic_group_cast = None
self.dynamic_group_media_status = None
self.dynamic_group_media_status_received = None
if self._dynamic_group_status_listener is not None:
self._dynamic_group_status_listener.invalidate()
self._dynamic_group_status_listener = None
# ========== Callbacks ==========
def new_cast_status(self, cast_status):
"""Handle updates of the cast status."""
self.cast_status = cast_status
self.schedule_update_ha_state()
def new_media_status(self, media_status):
"""Handle updates of the media status."""
self.media_status = media_status
self.media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_connection_status(self, connection_status):
"""Handle updates of connection status."""
_LOGGER.debug(
"[%s %s (%s:%s)] Received cast device connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._available = False
self._invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s (%s:%s)] Cast device availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
info = self._cast_info
if info.friendly_name is None and not info.is_audio_group:
# We couldn't find friendly_name when the cast was added, retry
self._cast_info = info.fill_out_missing_chromecast_info()
self._available = new_available
self.schedule_update_ha_state()
def new_dynamic_group_media_status(self, media_status):
"""Handle updates of the media status."""
self.dynamic_group_media_status = media_status
self.dynamic_group_media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_dynamic_group_connection_status(self, connection_status):
"""Handle updates of connection status."""
_LOGGER.debug(
"[%s %s (%s:%s)] Received dynamic group connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._dynamic_group_available = False
self._dynamic_group_invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._dynamic_group_available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s (%s:%s)] Dynamic group availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
connection_status.status,
)
self._dynamic_group_available = new_available
self.schedule_update_ha_state()
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle updates of audio group media status."""
_LOGGER.debug(
"[%s %s (%s:%s)] Multizone %s media status: %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
group_uuid,
media_status,
)
self.mz_media_status[group_uuid] = media_status
self.mz_media_status_received[group_uuid] = dt_util.utcnow()
self.schedule_update_ha_state()
# ========== Service Calls ==========
def _media_controller(self):
"""
Return media status.
First try from our own cast, then dynamic groups and finally
groups which our cast is a member in.
"""
media_status = self.media_status
media_controller = self._chromecast.media_controller
if (
media_status is None or media_status.player_state == "UNKNOWN"
) and self._dynamic_group_cast is not None:
media_status = self.dynamic_group_media_status
media_controller = self._dynamic_group_cast.media_controller
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_controller = self.mz_mgr.get_multizone_mediacontroller(k)
break
return media_controller
def turn_on(self):
"""Turn on the cast device."""
if not self._chromecast.is_idle:
# Already turned on
return
if self._chromecast.app_id is not None:
# Quit the previous app before starting splash screen
self._chromecast.quit_app()
# The only way we can turn the Chromecast is on is by launching an app
self._chromecast.play_media(CAST_SPLASH, pychromecast.STREAM_TYPE_BUFFERED)
def turn_off(self):
"""Turn off the cast device."""
self._chromecast.quit_app()
def mute_volume(self, mute):
"""Mute the volume."""
self._chromecast.set_volume_muted(mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._chromecast.set_volume(volume)
def media_play(self):
"""Send play command."""
media_controller = self._media_controller()
media_controller.play()
def media_pause(self):
"""Send pause command."""
media_controller = self._media_controller()
media_controller.pause()
def media_stop(self):
"""Send stop command."""
media_controller = self._media_controller()
media_controller.stop()
def media_previous_track(self):
"""Send previous track command."""
media_controller = self._media_controller()
media_controller.queue_prev()
def media_next_track(self):
"""Send next track command."""
media_controller = self._media_controller()
media_controller.queue_next()
def media_seek(self, position):
"""Seek the media to a specific location."""
media_controller = self._media_controller()
media_controller.seek(position)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL."""
# We do not want this to be forwarded to a group / dynamic group
self._chromecast.media_controller.play_media(media_id, media_type)
# ========== Properties ==========
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._cast_info.friendly_name
@property
def device_info(self):
"""Return information about the device."""
cast_info = self._cast_info
if cast_info.model_name == "Google Cast Group":
return None
return {
"name": cast_info.friendly_name,
"identifiers": {(CAST_DOMAIN, cast_info.uuid.replace("-", ""))},
"model": cast_info.model_name,
"manufacturer": cast_info.manufacturer,
}
def _media_status(self):
"""
Return media status.
First try from our own cast, then dynamic groups and finally
groups which our cast is a member in.
"""
media_status = self.media_status
media_status_received = self.media_status_received
if (
media_status is None or media_status.player_state == "UNKNOWN"
) and self._dynamic_group_cast is not None:
media_status = self.dynamic_group_media_status
media_status_received = self.dynamic_group_media_status_received
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_status = val
media_status_received = self.mz_media_status_received[k]
break
return (media_status, media_status_received)
@property
def state(self):
"""Return the state of the player."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.player_is_playing:
return STATE_PLAYING
if media_status.player_is_paused:
return STATE_PAUSED
if media_status.player_is_idle:
return STATE_IDLE
if self._chromecast is not None and self._chromecast.is_idle:
return STATE_OFF
return None
@property
def available(self):
"""Return True if the cast device is connected."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.cast_status.volume_muted if self.cast_status else None
@property
def media_content_id(self):
"""Content ID of current playing media."""
media_status, _ = self._media_status()
return media_status.content_id if media_status else None
@property
def media_content_type(self):
"""Content type of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
if media_status.media_is_movie:
return MEDIA_TYPE_MOVIE
if media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
media_status, _ = self._media_status()
return media_status.duration if media_status else None
@property
def media_image_url(self):
"""Image url of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
images = media_status.images
return images[0].url if images and images[0].url else None
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_title(self):
"""Title of current playing media."""
media_status, _ = self._media_status()
return media_status.title if media_status else None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.artist if media_status else None
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_name if media_status else None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_artist if media_status else None
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.track if media_status else None
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
media_status, _ = self._media_status()
return media_status.series_title if media_status else None
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.season if media_status else None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.episode if media_status else None
@property
def app_id(self):
"""Return the ID of the current running app."""
return self._chromecast.app_id if self._chromecast else None
@property
def app_name(self):
"""Name of the current running app."""
return self._chromecast.app_display_name if self._chromecast else None
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = SUPPORT_CAST
media_status, _ = self._media_status()
if media_status:
if media_status.supports_queue_next:
support |= SUPPORT_PREVIOUS_TRACK
if media_status.supports_queue_next:
support |= SUPPORT_NEXT_TRACK
if media_status.supports_seek:
support |= SUPPORT_SEEK
return support
@property
def media_position(self):
"""Position of current playing media in seconds."""
media_status, _ = self._media_status()
if media_status is None or not (
media_status.player_is_playing
or media_status.player_is_paused
or media_status.player_is_idle
):
return None
return media_status.current_time
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
_, media_status_recevied = self._media_status()
return media_status_recevied
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._cast_info.uuid
async def _async_cast_discovered(self, discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if self._cast_info.same_dynamic_group(discover):
_LOGGER.debug("Discovered matching dynamic group: %s", discover)
await self.async_set_dynamic_group(discover)
return
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
if self.services is None:
_LOGGER.warning(
"[%s %s (%s:%s)] Received update for manually added Cast",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.host,
self._cast_info.port,
)
return
_LOGGER.debug("Discovered chromecast with same UUID: %s", discover)
await self.async_set_cast_info(discover)
async def _async_cast_removed(self, discover: ChromecastInfo):
"""Handle removal of Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if (
self._dynamic_group_cast_info is not None
and self._dynamic_group_cast_info.uuid == discover.uuid
):
_LOGGER.debug("Removed matching dynamic group: %s", discover)
await self.async_del_dynamic_group()
return
if self._cast_info.uuid != discover.uuid:
# Removed is not our device.
return
_LOGGER.debug("Removed chromecast with same UUID: %s", discover)
await self.async_del_cast_info(discover)
async def _async_stop(self, event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
def _handle_signal_show_view(
self, controller: HomeAssistantController, entity_id: str, view_path: str
):
"""Handle a show view signal."""
if entity_id != self.entity_id:
return
if self._hass_cast_controller is None:
self._hass_cast_controller = controller
self._chromecast.register_handler(controller)
self._hass_cast_controller.show_lovelace_view(view_path)
|
import json
from io import BytesIO
from pathlib import Path
from tokenize import tokenize
import click
SECTIONS = [
'md_buttons',
'md_install',
'py_install',
'md_create',
'py_create',
'md_script',
'py_script',
'md_display',
'py_display', ]
@click.command()
@click.option(
'-t',
'--template-path',
type=click.Path(exists=True, readable=True),
required=True,
help='Path to pydeck Jupyter Notebook template.')
@click.argument('files', nargs=-1)
def main(files, template_path):
for path in files:
try:
convert_file(path, template_path)
except Exception as e:
print(f'Failed on file: {path}')
print(e)
def convert_file(path, template_path):
"""Convert python script to Jupyter Notebook
Args:
- path: Path to _Python_ file
- template_path: Path to Jupyter Notebook template
"""
path = Path(path)
with open(path) as f:
lines = f.readlines()
ee_script_block = extract_ee_script(lines)
pydeck_block = convert_pydeck_block(ee_script_block)
# Load Pydeck notebook template
with open(template_path) as f:
template_lines = f.readlines()
# Find index of line to replace
replace_ind = [
ind for ind, x in enumerate(template_lines)
if 'REPLACE_WITH_CUSTOM_EE_SCRIPT' in x][0]
# Remove this template line
template_lines.pop(replace_ind)
# Insert into list
# Ref: https://stackoverflow.com/a/3748092
template_lines[replace_ind:replace_ind] = pydeck_block
# Create path for notebook in same directory
out_path = path.parents[0] / (path.stem + '.ipynb')
with open(out_path, 'w') as f:
f.writelines(template_lines)
def extract_ee_script(lines):
"""Extract EE script from python file
"""
# The nth section used for python script
n_section = [ind for ind, x in enumerate(SECTIONS) if x == 'py_script'][0]
# Find indices of blocks
# Each block starts with `# %%`
blocks_idx = [
ind for ind, x in enumerate(lines) if x.strip().startswith('# %')]
assert len(blocks_idx) == len(SECTIONS), 'wrong number of blocks'
# Find start, end indices of python script block
start_idx, end_idx = blocks_idx[n_section:n_section + 2]
return lines[start_idx:end_idx]
def convert_pydeck_block(lines):
"""
- Remove any `Map.setCenter` commands (todo in future: parse?)
- Extract `Map.addLayer`, use to create Image object that can be passed to Deck.gl
"""
out_lines = []
for line in lines:
stripped = line.lower().strip()
if not stripped.startswith('map'):
out_lines.append(line)
continue
if stripped.startswith('map.addlayer'):
line = handle_add_layer(line)
out_lines.append(line)
continue
if stripped.startswith('map.setcenter'):
line = handle_set_center(line)
out_lines.append(line)
continue
# stringify to put in JSON
out_lines = [json.dumps(l) for l in out_lines]
# Add ,\n to each line
out_lines = [l + ',\n' for l in out_lines]
# Remove , from last line
out_lines[-1] = out_lines[-1][:-2] + '\n'
return out_lines
def handle_add_layer(line):
"""Convert Map.addLayer to EarthEngineLayer
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: eeObject, visParams, name, shown, opacity
add_layer_args = tokenize_command(line, 5)
ee_object, vis_params, _, _, opacity = add_layer_args
kwargs = []
if ee_object:
kwargs.append(f'ee_object={ee_object}')
if vis_params:
kwargs.append(f'vis_params={vis_params}')
if opacity:
kwargs.append(f'opacity={opacity}')
return f"ee_layers.append(EarthEngineLayer({', '.join(kwargs)}))\n"
def handle_set_center(line):
"""Convert Map.setCenter to pydeck.ViewState
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: lon, lat, zoom
set_center_args = tokenize_command(line, 3)
longitude, latitude, zoom = set_center_args
kwargs = []
if longitude:
kwargs.append(f'longitude={longitude}')
if latitude:
kwargs.append(f'latitude={latitude}')
if zoom:
kwargs.append(f'zoom={zoom}')
return f"view_state = pdk.ViewState({', '.join(kwargs)})\n"
def tokenize_command(line, n_args):
tokens = tokenize_line(line)
args = [''] * n_args
args_counter = 0
depth = 0
for token in tokens:
# Haven't entered function yet
if depth == 0 and not (token.type == 53 and token.string == '('):
continue
if token.string in ['(', '{', '[']:
if depth > 0:
args[args_counter] += token.string
depth += 1
continue
if token.string in [')', '}', ']']:
if depth > 1:
args[args_counter] += token.string
depth -= 1
continue
if depth == 1 and token.string == ',':
args_counter += 1
continue
args[args_counter] += token.string
return args
def stringify(lines):
"""Wrap lines in double quotes, to allow to be saved in JSON
"""
return [json.dumps(l) for l in lines]
def tokenize_line(line):
"""
Ref: https://stackoverflow.com/a/54375074
"""
file = BytesIO(line.encode())
return list(tokenize(file.readline))
if __name__ == '__main__':
main()
|
import re
from pathlib import Path
from notary.models import LICENSES as SUPPORTED_LICENSES
def guess_license(name):
"""Returns a list of classes that extend the :class:`License` abstract base class.
:param name: If a string is sent, it checks if it's a substring of any of the supported
licenses. If it is, all matches will be returned. Otherwise, all supported licenses
are returned.
"""
if not isinstance(name, str):
return SUPPORTED_LICENSES
else:
name = name.lower()
probable = []
likely = []
for cls in SUPPORTED_LICENSES:
if name in cls.name.lower() or name in cls.__name__.lower():
probable.append(cls)
else:
likely.append(cls)
if probable:
return probable
return likely
def find_license_files(folder=None):
"""Returns a list of :class:`Path <Path>` objects representing existing LICENSE files
in the specified directory or the current folder, if none was specified.
:param folder: String or instance of :class:`Path`
"""
if isinstance(folder, str):
folder = Path(folder)
if folder is None:
folder = Path(".")
rule = re.compile("(?i)license(\.[a-zA-Z]*)?")
return [path for path in folder.glob("*") if path.is_file() and rule.match(path.name)]
|
# Generated by Django 4.0a1 on 2021-09-24 12:02
# Modified on 2021-10-06 to remove any content.
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("euphro_auth", "0001_initial"),
]
operations = []
|
# coding: utf-8
"""
eBay Finances API
This API is used to retrieve seller payouts and monetary transaction details related to those payouts. # noqa: E501
OpenAPI spec version: 1.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderLineItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fee_basis_amount': 'Amount',
'line_item_id': 'str',
'marketplace_fees': 'list[Fee]'
}
attribute_map = {
'fee_basis_amount': 'feeBasisAmount',
'line_item_id': 'lineItemId',
'marketplace_fees': 'marketplaceFees'
}
def __init__(self, fee_basis_amount=None, line_item_id=None, marketplace_fees=None): # noqa: E501
"""OrderLineItem - a model defined in Swagger""" # noqa: E501
self._fee_basis_amount = None
self._line_item_id = None
self._marketplace_fees = None
self.discriminator = None
if fee_basis_amount is not None:
self.fee_basis_amount = fee_basis_amount
if line_item_id is not None:
self.line_item_id = line_item_id
if marketplace_fees is not None:
self.marketplace_fees = marketplace_fees
@property
def fee_basis_amount(self):
"""Gets the fee_basis_amount of this OrderLineItem. # noqa: E501
:return: The fee_basis_amount of this OrderLineItem. # noqa: E501
:rtype: Amount
"""
return self._fee_basis_amount
@fee_basis_amount.setter
def fee_basis_amount(self, fee_basis_amount):
"""Sets the fee_basis_amount of this OrderLineItem.
:param fee_basis_amount: The fee_basis_amount of this OrderLineItem. # noqa: E501
:type: Amount
"""
self._fee_basis_amount = fee_basis_amount
@property
def line_item_id(self):
"""Gets the line_item_id of this OrderLineItem. # noqa: E501
The unique identifier of an order line item. # noqa: E501
:return: The line_item_id of this OrderLineItem. # noqa: E501
:rtype: str
"""
return self._line_item_id
@line_item_id.setter
def line_item_id(self, line_item_id):
"""Sets the line_item_id of this OrderLineItem.
The unique identifier of an order line item. # noqa: E501
:param line_item_id: The line_item_id of this OrderLineItem. # noqa: E501
:type: str
"""
self._line_item_id = line_item_id
@property
def marketplace_fees(self):
"""Gets the marketplace_fees of this OrderLineItem. # noqa: E501
An array of all fees accrued for the order line item and deducted from a seller payout. # noqa: E501
:return: The marketplace_fees of this OrderLineItem. # noqa: E501
:rtype: list[Fee]
"""
return self._marketplace_fees
@marketplace_fees.setter
def marketplace_fees(self, marketplace_fees):
"""Sets the marketplace_fees of this OrderLineItem.
An array of all fees accrued for the order line item and deducted from a seller payout. # noqa: E501
:param marketplace_fees: The marketplace_fees of this OrderLineItem. # noqa: E501
:type: list[Fee]
"""
self._marketplace_fees = marketplace_fees
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrderLineItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderLineItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
"""Console script for ctx_to_zooniverse."""
import click
@click.command()
def main(args=None):
"""Console script for ctx_to_zooniverse."""
click.echo("Replace this message by putting your code into "
"ctx_to_zooniverse.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
if __name__ == "__main__":
main()
|
# nuScenes dev-kit.
# Code written by Sergi Adipraja Widjaja, 2019.
# + Map mask by Kiwoo Shin, 2019.
# + Methods operating on NuScenesMap and NuScenes by Holger Caesar, 2019.
import json
import os
import random
from typing import Dict, List, Tuple, Optional, Union
import cv2
import descartes
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle, Arrow
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from pyquaternion import Quaternion
from shapely import affinity
from shapely.geometry import Polygon, MultiPolygon, LineString, Point, box
from tqdm import tqdm
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
# Recommended style to use as the plots will show grids.
plt.style.use('seaborn-whitegrid')
# Define a map geometry type for polygons and lines.
Geometry = Union[Polygon, LineString]
class NuScenesMap:
"""
NuScenesMap database class for querying and retrieving information from the semantic maps.
Before using this class please use the provided tutorial in `map_demo.ipynb`.
Below you can find the map origins (south eastern corner, in [lat, lon]) for each of the 4 maps in nuScenes:
boston-seaport: [42.336849169438615, -71.05785369873047]
singapore-onenorth: [1.2882100868743724, 103.78475189208984]
singapore-hollandvillage: [1.2993652317780957, 103.78217697143555]
singapore-queenstown: [1.2782562240223188, 103.76741409301758]
The dimensions of the maps are as follows ([width, height] in meters):
singapore-onenorth: [1585.6, 2025.0]
singapore-hollandvillage: [2808.3, 2922.9]
singapore-queenstown: [3228.6, 3687.1]
boston-seaport: [2979.5, 2118.1]
The rasterized semantic maps (e.g. singapore-onenorth.png) published with nuScenes v1.0 have a scale of 10px/m,
hence the above numbers are the image dimensions divided by 10.
We use the same WGS 84 Web Mercator (EPSG:3857) projection as Google Maps/Earth.
"""
def __init__(self,
dataroot: str = '/data/sets/nuscenes',
map_name: str = 'singapore-onenorth'):
"""
Loads the layers, create reverse indices and shortcuts, initializes the explorer class.
:param dataroot: Path to the layers in the form of a .json file.
:param map_name: Which map out of `singapore-onenorth`, `singepore-hollandvillage`, `singapore-queenstown`,
`boston-seaport` that we want to load.
"""
assert map_name in ['singapore-onenorth', 'singapore-hollandvillage', 'singapore-queenstown', 'boston-seaport']
self.dataroot = dataroot
self.map_name = map_name
self.json_fname = os.path.join(self.dataroot, "maps", "{}.json".format(self.map_name))
self.geometric_layers = ['polygon', 'line', 'node']
# These are the non-geometric layers which have polygons as the geometric descriptors.
self.non_geometric_polygon_layers = ['drivable_area', 'road_segment', 'road_block', 'lane', 'ped_crossing',
'walkway', 'stop_line', 'carpark_area']
# These are the non-geometric layers which have line strings as the geometric descriptors.
self.non_geometric_line_layers = ['road_divider', 'lane_divider', 'traffic_light']
self.non_geometric_layers = self.non_geometric_polygon_layers + self.non_geometric_line_layers
self.layer_names = self.geometric_layers + self.non_geometric_polygon_layers + self.non_geometric_line_layers
with open(self.json_fname, 'r') as fh:
self.json_obj = json.load(fh)
self.canvas_edge = self.json_obj['canvas_edge']
self._load_layers()
self._make_token2ind()
self._make_shortcuts()
self.explorer = NuScenesMapExplorer(self)
# Parse the map version and print a warning for deprecated maps.
if 'version' in self.json_obj:
self.version = self.json_obj['version']
else:
self.version = '1.0'
if self.version < '1.1':
raise Exception('Error: You are using an outdated map version! '
'Please go to https://www.nuscenes.org/download to download the latest map!')
def _load_layer(self, layer_name: str) -> List[dict]:
"""
Returns a list of records corresponding to the layer name.
:param layer_name: Name of the layer that will be loaded.
:return: A list of records corresponding to a layer.
"""
return self.json_obj[layer_name]
def _load_layers(self) -> None:
""" Loads each available layer. """
# Explicit assignment of layers are necessary to help the IDE determine valid class members.
self.polygon = self._load_layer('polygon')
self.line = self._load_layer('line')
self.node = self._load_layer('node')
self.drivable_area = self._load_layer('drivable_area')
self.road_segment = self._load_layer('road_segment')
self.road_block = self._load_layer('road_block')
self.lane = self._load_layer('lane')
self.ped_crossing = self._load_layer('ped_crossing')
self.walkway = self._load_layer('walkway')
self.stop_line = self._load_layer('stop_line')
self.carpark_area = self._load_layer('carpark_area')
self.road_divider = self._load_layer('road_divider')
self.lane_divider = self._load_layer('lane_divider')
self.traffic_light = self._load_layer('traffic_light')
def _make_token2ind(self) -> None:
""" Store the mapping from token to layer index for each layer. """
self._token2ind = dict()
for layer_name in self.layer_names:
self._token2ind[layer_name] = dict()
for ind, member in enumerate(getattr(self, layer_name)):
self._token2ind[layer_name][member['token']] = ind
def _make_shortcuts(self) -> None:
""" Makes the record shortcuts. """
# Makes a shortcut between non geometric records to their nodes.
for layer_name in self.non_geometric_polygon_layers:
if layer_name == 'drivable_area': # Drivable area has more than one geometric representation.
pass
else:
for record in self.__dict__[layer_name]:
polygon_obj = self.get('polygon', record['polygon_token'])
record['exterior_node_tokens'] = polygon_obj['exterior_node_tokens']
record['holes'] = polygon_obj['holes']
for layer_name in self.non_geometric_line_layers:
for record in self.__dict__[layer_name]:
record['node_tokens'] = self.get('line', record['line_token'])['node_tokens']
# Makes a shortcut between stop lines to their cues, there's different cues for different types of stop line.
# Refer to `_get_stop_line_cue()` for details.
for record in self.stop_line:
cue = self._get_stop_line_cue(record)
record['cue'] = cue
# Makes a shortcut between lanes to their lane divider segment nodes.
for record in self.lane:
record['left_lane_divider_segment_nodes'] = [self.get('node', segment['node_token']) for segment in
record['left_lane_divider_segments']]
record['right_lane_divider_segment_nodes'] = [self.get('node', segment['node_token']) for segment in
record['right_lane_divider_segments']]
def _get_stop_line_cue(self, stop_line_record: dict) -> List[dict]:
"""
Get the different cues for different types of stop lines.
:param stop_line_record: A single stop line record.
:return: The cue for that stop line.
"""
if stop_line_record['stop_line_type'] in ['PED_CROSSING', 'TURN_STOP']:
return [self.get('ped_crossing', token) for token in stop_line_record['ped_crossing_tokens']]
elif stop_line_record['stop_line_type'] in ['STOP_SIGN', 'YIELD']:
return []
elif stop_line_record['stop_line_type'] == 'TRAFFIC_LIGHT':
return [self.get('traffic_light', token) for token in stop_line_record['traffic_light_tokens']]
def get(self, layer_name: str, token: str) -> dict:
"""
Returns a record from the layer in constant runtime.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record.
:return: A single layer record.
"""
assert layer_name in self.layer_names, "Layer {} not found".format(layer_name)
return getattr(self, layer_name)[self.getind(layer_name, token)]
def getind(self, layer_name: str, token: str) -> int:
"""
This returns the index of the record in a layer in constant runtime.
:param layer_name: Name of the layer we are interested in.
:param token: Token of the record.
:return: The index of the record in the layer, layer is an array.
"""
return self._token2ind[layer_name][token]
def render_record(self,
layer_name: str,
token: str,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15),
other_layers: List[str] = None) -> Tuple[Figure, Tuple[Axes, Axes]]:
"""
Render a single map record. By default will also render 3 layers which are `drivable_area`, `lane`,
and `walkway` unless specified by `other_layers`.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record that you want to render.
:param alpha: The opacity of each layer that gets rendered.
:param figsize: Size of the whole figure.
:param other_layers: What other layers to render aside from the one specified in `layer_name`.
:return: The matplotlib figure and axes of the rendered layers.
"""
return self.explorer.render_record(layer_name, token, alpha, figsize, other_layers)
def render_layers(self,
layer_names: List[str],
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15),
tokens: List[str] = None) -> Tuple[Figure, Axes]:
"""
Render a list of layer names.
:param layer_names: A list of layer names.
:param alpha: The opacity of each layer that gets rendered.
:param figsize: Size of the whole figure.
:param tokens: Optional list of tokens to render. None means all tokens are rendered.
:return: The matplotlib figure and axes of the rendered layers.
"""
return self.explorer.render_layers(layer_names, alpha, figsize, tokens)
def render_map_patch(self,
box_coords: Tuple[float, float, float, float],
layer_names: List[str] = None,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15),
render_egoposes_range: bool = True,
render_legend: bool = True) -> Tuple[Figure, Axes]:
"""
Renders a rectangular patch specified by `box_coords`. By default renders all layers.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param layer_names: All the non geometric layers that we want to render.
:param alpha: The opacity of each layer.
:param figsize: Size of the whole figure.
:param render_egoposes_range: Whether to render a rectangle around all ego poses.
:param render_legend: Whether to render the legend of map layers.
:return: The matplotlib figure and axes of the rendered layers.
"""
return self.explorer.render_map_patch(box_coords, layer_names, alpha, figsize,
render_egoposes_range, render_legend)
def render_map_in_image(self,
nusc: NuScenes,
sample_token: str,
camera_channel: str = 'CAM_FRONT',
alpha: float = 0.3,
patch_radius: float = 10000,
min_polygon_area: float = 1000,
render_behind_cam: bool = True,
render_outside_im: bool = True,
layer_names: List[str] = None,
verbose: bool = True,
out_path: str = None) -> None:
"""
Render a nuScenes camera image and overlay the polygons for the specified map layers.
Note that the projections are not always accurate as the localization is in 2d.
:param nusc: The NuScenes instance to load the image from.
:param sample_token: The image's corresponding sample_token.
:param camera_channel: Camera channel name, e.g. 'CAM_FRONT'.
:param alpha: The transparency value of the layers to render in [0, 1].
:param patch_radius: The radius in meters around the ego car in which to select map records.
:param min_polygon_area: Minimum area a polygon needs to have to be rendered.
:param render_behind_cam: Whether to render polygons where any point is behind the camera.
:param render_outside_im: Whether to render polygons where any point is outside the image.
:param layer_names: The names of the layers to render, e.g. ['lane'].
If set to None, the recommended setting will be used.
:param verbose: Whether to print to stdout.
:param out_path: Optional path to save the rendered figure to disk.
"""
self.explorer.render_map_in_image(nusc, sample_token, camera_channel=camera_channel, alpha=alpha,
patch_radius=patch_radius, min_polygon_area=min_polygon_area,
render_behind_cam=render_behind_cam, render_outside_im=render_outside_im,
layer_names=layer_names, verbose=verbose, out_path=out_path)
def render_egoposes_on_fancy_map(self,
nusc: NuScenes,
scene_tokens: List = None,
verbose: bool = True,
out_path: str = None,
render_egoposes: bool = True,
render_egoposes_range: bool = True,
render_legend: bool = True) -> np.ndarray:
"""
Renders each ego pose of a list of scenes on the map (around 40 poses per scene).
This method is heavily inspired by NuScenes.render_egoposes_on_map(), but uses the map expansion pack maps.
:param nusc: The NuScenes instance to load the ego poses from.
:param scene_tokens: Optional list of scene tokens corresponding to the current map location.
:param verbose: Whether to show status messages and progress bar.
:param out_path: Optional path to save the rendered figure to disk.
:param render_egoposes: Whether to render ego poses.
:param render_egoposes_range: Whether to render a rectangle around all ego poses.
:param render_legend: Whether to render the legend of map layers.
:return: <np.float32: n, 2>. Returns a matrix with n ego poses in global map coordinates.
"""
return self.explorer.render_egoposes_on_fancy_map(nusc, scene_tokens=scene_tokens,
verbose=verbose, out_path=out_path,
render_egoposes=render_egoposes,
render_egoposes_range=render_egoposes_range,
render_legend=render_legend)
def render_map_mask(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str] = None,
canvas_size: Tuple[int, int] = (100, 100),
figsize: Tuple[int, int] = (15, 15),
n_row: int = 2) -> Tuple[Figure, List[Axes]]:
"""
Render map mask of the patch specified by patch_box and patch_angle.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:param layer_names: A list of layer names to be returned.
:param canvas_size: Size of the output mask (h, w).
:param figsize: Size of the figure.
:param n_row: Number of rows with plots.
:return: The matplotlib figure and a list of axes of the rendered layers.
"""
return self.explorer.render_map_mask(patch_box, patch_angle, layer_names, canvas_size,
figsize=figsize, n_row=n_row)
def get_map_mask(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str] = None,
canvas_size: Tuple[int, int] = (100, 100)) -> np.ndarray:
"""
Return list of map mask layers of the specified patch.
:param patch_box: Patch box defined as [x_center, y_center, height, width]. If None, this plots the entire map.
:param patch_angle: Patch orientation in degrees. North-facing corresponds to 0.
:param layer_names: A list of layer names to be extracted, or None for all non-geometric layers.
:param canvas_size: Size of the output mask (h, w). If None, we use the default resolution of 10px/m.
:return: Stacked numpy array of size [c x h x w] with c channels and the same width/height as the canvas.
"""
return self.explorer.get_map_mask(patch_box, patch_angle, layer_names, canvas_size)
def get_map_geom(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str]) -> List[Tuple[str, List[Geometry]]]:
"""
Returns a list of geometries in the specified patch_box.
These are unscaled, but aligned with the patch angle.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
North-facing corresponds to 0.
:param layer_names: A list of layer names to be extracted, or None for all non-geometric layers.
:return: List of layer names and their corresponding geometries.
"""
return self.explorer.get_map_geom(patch_box, patch_angle, layer_names)
def get_records_in_patch(self,
box_coords: Tuple[float, float, float, float],
layer_names: List[str] = None,
mode: str = 'intersect') -> Dict[str, List[str]]:
"""
Get all the record token that intersects or is within a particular rectangular patch.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param layer_names: Names of the layers that we want to retrieve in a particular patch. By default will always
look at the all non geometric layers.
:param mode: "intersect" will return all non geometric records that intersects the patch, "within" will return
all non geometric records that are within the patch.
:return: Dictionary of layer_name - tokens pairs.
"""
return self.explorer.get_records_in_patch(box_coords, layer_names, mode)
def is_record_in_patch(self,
layer_name: str,
token: str,
box_coords: Tuple[float, float, float, float],
mode: str = 'intersect') -> bool:
"""
Query whether a particular record is in a rectangular patch
:param layer_name: The layer name of the record.
:param token: The record token.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param mode: "intersect" means it will return True if the geometric object intersects the patch, "within" will
return True if the geometric object is within the patch.
:return: Boolean value on whether a particular record intersects or within a particular patch.
"""
return self.explorer.is_record_in_patch(layer_name, token, box_coords, mode)
def layers_on_point(self, x: float, y: float) -> Dict[str, str]:
"""
Returns all the polygonal layers that a particular point is on.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:return: All the polygonal layers that a particular point is on. {<layer name>: <list of tokens>}
"""
return self.explorer.layers_on_point(x, y)
def record_on_point(self, x: float, y: float, layer_name: str) -> str:
"""
Query what record of a layer a particular point is on.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:param layer_name: The non geometric polygonal layer name that we are interested in.
:return: The record token of a layer a particular point is on.
"""
return self.explorer.record_on_point(x, y, layer_name)
def extract_polygon(self, polygon_token: str) -> Polygon:
"""
Construct a shapely Polygon object out of a polygon token.
:param polygon_token: The token of the polygon record.
:return: The polygon wrapped in a shapely Polygon object.
"""
return self.explorer.extract_polygon(polygon_token)
def extract_line(self, line_token: str) -> LineString:
"""
Construct a shapely LineString object out of a line token.
:param line_token: The token of the line record.
:return: The line wrapped in a LineString object.
"""
return self.explorer.extract_line(line_token)
def get_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]:
"""
Get the bounds of the geometric object that corresponds to a non geometric record.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record.
:return: min_x, min_y, max_x, max_y of of the line representation.
"""
return self.explorer.get_bounds(layer_name, token)
def render_next_roads(self,
x: float,
y: float,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15)) -> None:
"""
Renders the possible next roads from a point of interest.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:param alpha: The opacity of each layer that gets rendered.
:param figsize: Size of the whole figure.
"""
self.explorer.render_next_roads(x, y, alpha, figsize)
def get_next_roads(self, x: float, y: float) -> Dict[str, List[str]]:
"""
Get the possible next roads from a point of interest.
Returns road_segment, road_block and lane.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:return: Dictionary of layer_name - tokens pairs.
"""
# Filter out irrelevant layers.
road_layers = ['road_segment', 'road_block', 'lane']
layers = self.explorer.layers_on_point(x, y)
rel_layers = {layer: layers[layer] for layer in road_layers}
# TODO: figure out what this is supposed to do
rel_layer = None
rel_token = None
for layer in road_layers:
if rel_layers[layer] != '':
rel_layer = layer
rel_token = rel_layers[layer]
break
assert rel_layer is not None, 'Error: No suitable layer in the specified point location!'
# Get all records that overlap with the bounding box of the selected road.
box_coords = self.explorer.get_bounds(rel_layer, rel_token)
intersect = self.explorer.get_records_in_patch(box_coords, road_layers, mode='intersect')
# Go through all objects within the bounding box.
result = {layer: [] for layer in road_layers}
if rel_layer == 'road_segment':
# For road segments, we do not have a direction.
# Return objects that have ANY exterior points in common with the relevant layer.
rel_exterior_nodes = self.get(rel_layer, rel_token)['exterior_node_tokens']
for layer in road_layers:
for token in intersect[layer]:
exterior_nodes = self.get(layer, token)['exterior_node_tokens']
if any(n in exterior_nodes for n in rel_exterior_nodes) \
and token != rel_layers[layer]:
result[layer].append(token)
else:
# For lanes and road blocks, the next road is indicated by the edge line.
# Return objects where ALL edge line nodes are included in the exterior nodes.
to_edge_line = self.get(rel_layer, rel_token)['to_edge_line_token']
to_edge_nodes = self.get('line', to_edge_line)['node_tokens']
for layer in road_layers:
for token in intersect[layer]:
exterior_nodes = self.get(layer, token)['exterior_node_tokens']
if all(n in exterior_nodes for n in to_edge_nodes) \
and token != rel_layers[layer]:
result[layer].append(token)
return result
class NuScenesMapExplorer:
""" Helper class to explore the nuScenes map data. """
def __init__(self,
map_api: NuScenesMap,
representative_layers: Tuple[str] = ('drivable_area', 'lane', 'walkway'),
color_map: dict = None):
"""
:param map_api: NuScenesMap database class.
:param representative_layers: These are the layers that we feel are representative of the whole mapping data.
:param color_map: Color map.
"""
# Mutable default argument.
if color_map is None:
color_map = dict(drivable_area='#a6cee3',
road_segment='#1f78b4',
road_block='#b2df8a',
lane='#33a02c',
ped_crossing='#fb9a99',
walkway='#e31a1c',
stop_line='#fdbf6f',
carpark_area='#ff7f00',
road_divider='#cab2d6',
lane_divider='#6a3d9a',
traffic_light='#7e772e')
self.map_api = map_api
self.representative_layers = representative_layers
self.color_map = color_map
self.canvas_max_x = self.map_api.canvas_edge[0]
self.canvas_min_x = 0
self.canvas_max_y = self.map_api.canvas_edge[1]
self.canvas_min_y = 0
self.canvas_aspect_ratio = (self.canvas_max_x - self.canvas_min_x) / (self.canvas_max_y - self.canvas_min_y)
def render_map_mask(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str],
canvas_size: Tuple[int, int],
figsize: Tuple[int, int],
n_row: int = 2) -> Tuple[Figure, List[Axes]]:
"""
Render map mask of the patch specified by patch_box and patch_angle.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:param layer_names: A list of layer names to be extracted.
:param canvas_size: Size of the output mask (h, w).
:param figsize: Size of the figure.
:param n_row: Number of rows with plots.
:return: The matplotlib figure and a list of axes of the rendered layers.
"""
if layer_names is None:
layer_names = self.map_api.non_geometric_layers
map_mask = self.get_map_mask(patch_box, patch_angle, layer_names, canvas_size)
# If no canvas_size is specified, retrieve the default from the output of get_map_mask.
if canvas_size is None:
canvas_size = map_mask.shape[1:]
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim(0, canvas_size[1])
ax.set_ylim(0, canvas_size[0])
n_col = len(map_mask) // n_row
gs = gridspec.GridSpec(n_row, n_col)
gs.update(wspace=0.025, hspace=0.05)
for i in range(len(map_mask)):
r = i // n_col
c = i - r * n_col
subax = plt.subplot(gs[r, c])
subax.imshow(map_mask[i], origin='lower')
subax.text(canvas_size[0] * 0.5, canvas_size[1] * 1.1, layer_names[i])
subax.grid(False)
return fig, fig.axes
def get_map_geom(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str]) -> List[Tuple[str, List[Geometry]]]:
"""
Returns a list of geometries in the specified patch_box.
These are unscaled, but aligned with the patch angle.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
North-facing corresponds to 0.
:param layer_names: A list of layer names to be extracted, or None for all non-geometric layers.
:return: List of layer names and their corresponding geometries.
"""
# If None, return all geometric layers.
if layer_names is None:
layer_names = self.map_api.non_geometric_layers
# Get each layer name and geometry and store them in a list.
map_geom = []
for layer_name in layer_names:
layer_geom = self._get_layer_geom(patch_box, patch_angle, layer_name)
if layer_geom is None:
continue
map_geom.append((layer_name, layer_geom))
return map_geom
def map_geom_to_mask(self,
map_geom: List[Tuple[str, List[Geometry]]],
local_box: Tuple[float, float, float, float],
canvas_size: Tuple[int, int]) -> np.ndarray:
"""
Return list of map mask layers of the specified patch.
:param map_geom: List of layer names and their corresponding geometries.
:param local_box: The local patch box defined as (x_center, y_center, height, width), where typically
x_center = y_center = 0.
:param canvas_size: Size of the output mask (h, w).
:return: Stacked numpy array of size [c x h x w] with c channels and the same height/width as the canvas.
"""
# Get each layer mask and stack them into a numpy tensor.
map_mask = []
for layer_name, layer_geom in map_geom:
layer_mask = self._layer_geom_to_mask(layer_name, layer_geom, local_box, canvas_size)
if layer_mask is not None:
map_mask.append(layer_mask)
return np.array(map_mask)
def get_map_mask(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_names: List[str] = None,
canvas_size: Tuple[int, int] = (100, 100)) -> np.ndarray:
"""
Return list of map mask layers of the specified patch.
:param patch_box: Patch box defined as [x_center, y_center, height, width]. If None, this plots the entire map.
:param patch_angle: Patch orientation in degrees. North-facing corresponds to 0.
:param layer_names: A list of layer names to be extracted, or None for all non-geometric layers.
:param canvas_size: Size of the output mask (h, w). If None, we use the default resolution of 10px/m.
:return: Stacked numpy array of size [c x h x w] with c channels and the same width/height as the canvas.
"""
# For some combination of parameters, we need to know the size of the current map.
if self.map_api.map_name == 'singapore-onenorth':
map_dims = [1585.6, 2025.0]
elif self.map_api.map_name == 'singapore-hollandvillage':
map_dims = [2808.3, 2922.9]
elif self.map_api.map_name == 'singapore-queenstown':
map_dims = [3228.6, 3687.1]
elif self.map_api.map_name == 'boston-seaport':
map_dims = [2979.5, 2118.1]
else:
raise Exception('Error: Invalid map!')
# If None, return the entire map.
if patch_box is None:
patch_box = [map_dims[0] / 2, map_dims[1] / 2, map_dims[1], map_dims[0]]
# If None, return all geometric layers.
if layer_names is None:
layer_names = self.map_api.non_geometric_layers
# If None, return the specified patch in the original scale of 10px/m.
if canvas_size is None:
map_scale = 10
canvas_size = np.array((patch_box[2], patch_box[3])) * map_scale
canvas_size = tuple(np.round(canvas_size).astype(np.int32))
# Get geometry of each layer.
map_geom = self.get_map_geom(patch_box, patch_angle, layer_names)
# Convert geometry of each layer into mask and stack them into a numpy tensor.
# Convert the patch box from global coordinates to local coordinates by setting the center to (0, 0).
local_box = (0.0, 0.0, patch_box[2], patch_box[3])
map_mask = self.map_geom_to_mask(map_geom, local_box, canvas_size)
assert np.all(map_mask.shape[1:] == canvas_size)
return map_mask
def render_record(self,
layer_name: str,
token: str,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15),
other_layers: List[str] = None) -> Tuple[Figure, Tuple[Axes, Axes]]:
"""
Render a single map record.
By default will also render 3 layers which are `drivable_area`, `lane`, and `walkway` unless specified by
`other_layers`.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record that you want to render.
:param alpha: The opacity of each layer that gets rendered.
:param figsize: Size of the whole figure.
:param other_layers: What other layers to render aside from the one specified in `layer_name`.
:return: The matplotlib figure and axes of the rendered layers.
"""
if other_layers is None:
other_layers = list(self.representative_layers)
for other_layer in other_layers:
if other_layer not in self.map_api.non_geometric_layers:
raise ValueError("{} is not a non geometric layer".format(layer_name))
x1, y1, x2, y2 = self.map_api.get_bounds(layer_name, token)
local_width = x2 - x1
local_height = y2 - y1
assert local_height > 0, 'Error: Map has 0 height!'
local_aspect_ratio = local_width / local_height
# We obtained the values 0.65 and 0.66 by trials.
fig = plt.figure(figsize=figsize)
global_ax = fig.add_axes([0, 0, 0.65, 0.65 / self.canvas_aspect_ratio])
local_ax = fig.add_axes([0.66, 0.66 / self.canvas_aspect_ratio, 0.34, 0.34 / local_aspect_ratio])
# To make sure the sequence of the layer overlays is always consistent after typesetting set().
random.seed('nutonomy')
layer_names = other_layers + [layer_name]
layer_names = list(set(layer_names))
for layer in layer_names:
self._render_layer(global_ax, layer, alpha)
for layer in layer_names:
self._render_layer(local_ax, layer, alpha)
if layer_name == 'drivable_area':
# Bad output aesthetically if we add spacing between the objects and the axes for drivable area.
local_ax_xlim = (x1, x2)
local_ax_ylim = (y1, y2)
else:
# Add some spacing between the object and the axes.
local_ax_xlim = (x1 - local_width / 3, x2 + local_width / 3)
local_ax_ylim = (y1 - local_height / 3, y2 + local_height / 3)
# Draws the rectangular patch on the local_ax.
local_ax.add_patch(Rectangle((x1, y1), local_width, local_height, linestyle='-.', color='red', fill=False,
lw=2))
local_ax.set_xlim(*local_ax_xlim)
local_ax.set_ylim(*local_ax_ylim)
local_ax.set_title('Local View')
global_ax.set_xlim(self.canvas_min_x, self.canvas_max_x)
global_ax.set_ylim(self.canvas_min_y, self.canvas_max_y)
global_ax.set_title('Global View')
global_ax.legend()
# Adds the zoomed in effect to the plot.
mark_inset(global_ax, local_ax, loc1=2, loc2=4, color='black')
return fig, (global_ax, local_ax)
def render_layers(self,
layer_names: List[str],
alpha: float,
figsize: Tuple[int, int],
tokens: List[str] = None) -> Tuple[Figure, Axes]:
"""
Render a list of layers.
:param layer_names: A list of layer names.
:param alpha: The opacity of each layer.
:param figsize: Size of the whole figure.
:param tokens: Optional list of tokens to render. None means all tokens are rendered.
:return: The matplotlib figure and axes of the rendered layers.
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1 / self.canvas_aspect_ratio])
ax.set_xlim(self.canvas_min_x, self.canvas_max_x)
ax.set_ylim(self.canvas_min_y, self.canvas_max_y)
layer_names = list(set(layer_names))
for layer_name in layer_names:
self._render_layer(ax, layer_name, alpha, tokens)
ax.legend()
return fig, ax
def render_map_patch(self,
box_coords: Tuple[float, float, float, float],
layer_names: List[str] = None,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15),
render_egoposes_range: bool = True,
render_legend: bool = True) -> Tuple[Figure, Axes]:
"""
Renders a rectangular patch specified by `box_coords`. By default renders all layers.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param layer_names: All the non geometric layers that we want to render.
:param alpha: The opacity of each layer.
:param figsize: Size of the whole figure.
:param render_egoposes_range: Whether to render a rectangle around all ego poses.
:param render_legend: Whether to render the legend of map layers.
:return: The matplotlib figure and axes of the rendered layers.
"""
x_min, y_min, x_max, y_max = box_coords
if layer_names is None:
layer_names = self.map_api.non_geometric_layers
fig = plt.figure(figsize=figsize)
local_width = x_max - x_min
local_height = y_max - y_min
assert local_height > 0, 'Error: Map patch has 0 height!'
local_aspect_ratio = local_width / local_height
ax = fig.add_axes([0, 0, 1, 1 / local_aspect_ratio])
for layer_name in layer_names:
self._render_layer(ax, layer_name, alpha)
x_margin = np.minimum(local_width / 4, 50)
y_margin = np.minimum(local_height / 4, 10)
ax.set_xlim(x_min - x_margin, x_max + x_margin)
ax.set_ylim(y_min - y_margin, y_max + y_margin)
if render_egoposes_range:
ax.add_patch(Rectangle((x_min, y_min), local_width, local_height, fill=False, linestyle='-.', color='red',
lw=2))
ax.text(x_min + local_width / 100, y_min + local_height / 2, "%g m" % local_height,
fontsize=14, weight='bold')
ax.text(x_min + local_width / 2, y_min + local_height / 100, "%g m" % local_width,
fontsize=14, weight='bold')
if render_legend:
ax.legend(frameon=True, loc='upper right')
return fig, ax
def render_map_in_image(self,
nusc: NuScenes,
sample_token: str,
camera_channel: str = 'CAM_FRONT',
alpha: float = 0.3,
patch_radius: float = 10000,
min_polygon_area: float = 1000,
render_behind_cam: bool = True,
render_outside_im: bool = True,
layer_names: List[str] = None,
verbose: bool = True,
out_path: str = None) -> None:
"""
Render a nuScenes camera image and overlay the polygons for the specified map layers.
Note that the projections are not always accurate as the localization is in 2d.
:param nusc: The NuScenes instance to load the image from.
:param sample_token: The image's corresponding sample_token.
:param camera_channel: Camera channel name, e.g. 'CAM_FRONT'.
:param alpha: The transparency value of the layers to render in [0, 1].
:param patch_radius: The radius in meters around the ego car in which to select map records.
:param min_polygon_area: Minimum area a polygon needs to have to be rendered.
:param render_behind_cam: Whether to render polygons where any point is behind the camera.
:param render_outside_im: Whether to render polygons where any point is outside the image.
:param layer_names: The names of the layers to render, e.g. ['lane'].
If set to None, the recommended setting will be used.
:param verbose: Whether to print to stdout.
:param out_path: Optional path to save the rendered figure to disk.
"""
near_plane = 1e-8
if verbose:
print('Warning: Note that the projections are not always accurate as the localization is in 2d.')
# Default layers.
if layer_names is None:
layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area']
# Check layers whether we can render them.
for layer_name in layer_names:
assert layer_name in self.map_api.non_geometric_polygon_layers, \
'Error: Can only render non-geometry polygons: %s' % layer_names
# Check that NuScenesMap was loaded for the correct location.
sample_record = nusc.get('sample', sample_token)
scene_record = nusc.get('scene', sample_record['scene_token'])
log_record = nusc.get('log', scene_record['log_token'])
log_location = log_record['location']
assert self.map_api.map_name == log_location, \
'Error: NuScenesMap loaded for location %s, should be %s!' % (self.map_api.map_name, log_location)
# Grab the front camera image and intrinsics.
cam_token = sample_record['data'][camera_channel]
cam_record = nusc.get('sample_data', cam_token)
cam_path = nusc.get_sample_data_path(cam_token)
im = Image.open(cam_path)
im_size = im.size
cs_record = nusc.get('calibrated_sensor', cam_record['calibrated_sensor_token'])
cam_intrinsic = np.array(cs_record['camera_intrinsic'])
# Retrieve the current map.
poserecord = nusc.get('ego_pose', cam_record['ego_pose_token'])
ego_pose = poserecord['translation']
box_coords = (
ego_pose[0] - patch_radius,
ego_pose[1] - patch_radius,
ego_pose[0] + patch_radius,
ego_pose[1] + patch_radius,
)
records_in_patch = self.get_records_in_patch(box_coords, layer_names, 'intersect')
# Init axes.
fig = plt.figure(figsize=(9, 16))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim(0, im_size[0])
ax.set_ylim(0, im_size[1])
ax.imshow(im)
# Retrieve and render each record.
for layer_name in layer_names:
for token in records_in_patch[layer_name]:
record = self.map_api.get(layer_name, token)
if layer_name == 'drivable_area':
polygon_tokens = record['polygon_tokens']
else:
polygon_tokens = [record['polygon_token']]
for polygon_token in polygon_tokens:
polygon = self.map_api.extract_polygon(polygon_token)
# Convert polygon nodes to pointcloud with 0 height.
points = np.array(polygon.exterior.xy)
points = np.vstack((points, np.zeros((1, points.shape[1]))))
# Transform into the ego vehicle frame for the timestamp of the image.
points = points - np.array(poserecord['translation']).reshape((-1, 1))
points = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, points)
# Transform into the camera.
points = points - np.array(cs_record['translation']).reshape((-1, 1))
points = np.dot(Quaternion(cs_record['rotation']).rotation_matrix.T, points)
# Remove points that are partially behind the camera.
depths = points[2, :]
behind = depths < near_plane
if np.all(behind):
continue
if render_behind_cam:
# Perform clipping on polygons that are partially behind the camera.
points = NuScenesMapExplorer._clip_points_behind_camera(points, near_plane)
elif np.any(behind):
# Otherwise ignore any polygon that is partially behind the camera.
continue
# Ignore polygons with less than 3 points after clipping.
if len(points) == 0 or points.shape[1] < 3:
continue
# Grab the depths before performing the projection (z axis points away from the camera).
depths = points[2, :]
# Take the actual picture (matrix multiplication with camera-matrix + renormalization).
points = view_points(points, cam_intrinsic, normalize=True)
# Skip polygons where all points are outside the image.
# Leave a margin of 1 pixel for aesthetic reasons.
inside = np.ones(depths.shape[0], dtype=bool)
inside = np.logical_and(inside, points[0, :] > 1)
inside = np.logical_and(inside, points[0, :] < im.size[0] - 1)
inside = np.logical_and(inside, points[1, :] > 1)
inside = np.logical_and(inside, points[1, :] < im.size[1] - 1)
if render_outside_im:
if np.all(np.logical_not(inside)):
continue
else:
if np.any(np.logical_not(inside)):
continue
points = points[:2, :]
points = [(p0, p1) for (p0, p1) in zip(points[0], points[1])]
polygon_proj = Polygon(points)
# Filter small polygons
if polygon_proj.area < min_polygon_area:
continue
label = layer_name
ax.add_patch(descartes.PolygonPatch(polygon_proj, fc=self.color_map[layer_name], alpha=alpha,
label=label))
# Display the image.
plt.axis('off')
ax.invert_yaxis()
if out_path is not None:
plt.tight_layout()
plt.savefig(out_path, bbox_inches='tight', pad_inches=0)
def render_egoposes_on_fancy_map(self,
nusc: NuScenes,
scene_tokens: List = None,
verbose: bool = True,
out_path: str = None,
render_egoposes: bool = True,
render_egoposes_range: bool = True,
render_legend: bool = True) -> np.ndarray:
"""
Renders each ego pose of a list of scenes on the map (around 40 poses per scene).
This method is heavily inspired by NuScenes.render_egoposes_on_map(), but uses the map expansion pack maps.
Note that the maps are constantly evolving, whereas we only released a single snapshot of the data.
Therefore for some scenes there is a bad fit between ego poses and maps.
:param nusc: The NuScenes instance to load the ego poses from.
:param scene_tokens: Optional list of scene tokens corresponding to the current map location.
:param verbose: Whether to show status messages and progress bar.
:param out_path: Optional path to save the rendered figure to disk.
:param render_egoposes: Whether to render ego poses.
:param render_egoposes_range: Whether to render a rectangle around all ego poses.
:param render_legend: Whether to render the legend of map layers.
:return: <np.float32: n, 2>. Returns a matrix with n ego poses in global map coordinates.
"""
# Settings
patch_margin = 2
min_diff_patch = 30
# Ids of scenes with a bad match between localization and map.
scene_blacklist = [499, 515, 517]
# Get logs by location.
log_location = self.map_api.map_name
log_tokens = [l['token'] for l in nusc.log if l['location'] == log_location]
assert len(log_tokens) > 0, 'Error: This split has 0 scenes for location %s!' % log_location
# Filter scenes.
scene_tokens_location = [e['token'] for e in nusc.scene if e['log_token'] in log_tokens]
if scene_tokens is not None:
scene_tokens_location = [t for t in scene_tokens_location if t in scene_tokens]
assert len(scene_tokens_location) > 0, 'Error: Found 0 valid scenes for location %s!' % log_location
map_poses = []
if verbose:
print('Adding ego poses to map...')
for scene_token in tqdm(scene_tokens_location, disable=not verbose):
# Check that the scene is from the correct location.
scene_record = nusc.get('scene', scene_token)
scene_name = scene_record['name']
scene_id = int(scene_name.replace('scene-', ''))
log_record = nusc.get('log', scene_record['log_token'])
assert log_record['location'] == log_location, \
'Error: The provided scene_tokens do not correspond to the provided map location!'
# Print a warning if the localization is known to be bad.
if verbose and scene_id in scene_blacklist:
print('Warning: %s is known to have a bad fit between ego pose and map.' % scene_name)
# For each sample in the scene, store the ego pose.
sample_tokens = nusc.field2token('sample', 'scene_token', scene_token)
for sample_token in sample_tokens:
sample_record = nusc.get('sample', sample_token)
# Poses are associated with the sample_data. Here we use the lidar sample_data.
sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token'])
# Calculate the pose on the map and append.
map_poses.append(pose_record['translation'])
# Check that ego poses aren't empty.
assert len(map_poses) > 0, 'Error: Found 0 ego poses. Please check the inputs.'
# Compute number of close ego poses.
if verbose:
print('Creating plot...')
map_poses = np.vstack(map_poses)[:, :2]
# Render the map patch with the current ego poses.
min_patch = np.floor(map_poses.min(axis=0) - patch_margin)
max_patch = np.ceil(map_poses.max(axis=0) + patch_margin)
diff_patch = max_patch - min_patch
if any(diff_patch < min_diff_patch):
center_patch = (min_patch + max_patch) / 2
diff_patch = np.maximum(diff_patch, min_diff_patch)
min_patch = center_patch - diff_patch / 2
max_patch = center_patch + diff_patch / 2
my_patch = (min_patch[0], min_patch[1], max_patch[0], max_patch[1])
fig, ax = self.render_map_patch(my_patch, self.map_api.non_geometric_layers, figsize=(10, 10),
render_egoposes_range=render_egoposes_range,
render_legend=render_legend)
# Plot in the same axis as the map.
# Make sure these are plotted "on top".
if render_egoposes:
ax.scatter(map_poses[:, 0], map_poses[:, 1], s=20, c='k', alpha=1.0, zorder=2)
plt.axis('off')
if out_path is not None:
plt.savefig(out_path, bbox_inches='tight', pad_inches=0)
return map_poses
def render_next_roads(self,
x: float,
y: float,
alpha: float = 0.5,
figsize: Tuple[int, int] = (15, 15)) -> None:
"""
Renders the possible next roads from a point of interest.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:param alpha: The opacity of each layer that gets rendered.
:param figsize: Size of the whole figure.
"""
# Get next roads.
next_roads = self.map_api.get_next_roads(x, y)
layer_names = []
tokens = []
for layer_name, layer_tokens in next_roads.items():
if len(layer_tokens) > 0:
layer_names.append(layer_name)
tokens.extend(layer_tokens)
# Render them.
fig, ax = self.render_layers(layer_names, alpha, figsize, tokens)
# Render current location with an x.
ax.plot(x, y, 'x', markersize=12, color='red')
@staticmethod
def _clip_points_behind_camera(points, near_plane: float):
"""
Perform clipping on polygons that are partially behind the camera.
This method is necessary as the projection does not work for points behind the camera.
Hence we compute the line between the point and the camera and follow that line until we hit the near plane of
the camera. Then we use that point.
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param near_plane: If we set the near_plane distance of the camera to 0 then some points will project to
infinity. Therefore we need to clip these points at the near plane.
:return: The clipped version of the polygon. This may have fewer points than the original polygon if some lines
were entirely behind the polygon.
"""
points_clipped = []
# Loop through each line on the polygon.
# For each line where exactly 1 endpoints is behind the camera, move the point along the line until
# it hits the near plane of the camera (clipping).
assert points.shape[0] == 3
point_count = points.shape[1]
for line_1 in range(point_count):
line_2 = (line_1 + 1) % point_count
point_1 = points[:, line_1]
point_2 = points[:, line_2]
z_1 = point_1[2]
z_2 = point_2[2]
if z_1 >= near_plane and z_2 >= near_plane:
# Both points are in front.
# Add both points unless the first is already added.
if len(points_clipped) == 0 or all(points_clipped[-1] != point_1):
points_clipped.append(point_1)
points_clipped.append(point_2)
elif z_1 < near_plane and z_2 < near_plane:
# Both points are in behind.
# Don't add anything.
continue
else:
# One point is in front, one behind.
# By convention pointA is behind the camera and pointB in front.
if z_1 <= z_2:
point_a = points[:, line_1]
point_b = points[:, line_2]
else:
point_a = points[:, line_2]
point_b = points[:, line_1]
z_a = point_a[2]
z_b = point_b[2]
# Clip line along near plane.
pointdiff = point_b - point_a
alpha = (near_plane - z_b) / (z_a - z_b)
clipped = point_a + (1 - alpha) * pointdiff
assert np.abs(clipped[2] - near_plane) < 1e-6
# Add the first point (if valid and not duplicate), the clipped point and the second point (if valid).
if z_1 >= near_plane and (len(points_clipped) == 0 or all(points_clipped[-1] != point_1)):
points_clipped.append(point_1)
points_clipped.append(clipped)
if z_2 >= near_plane:
points_clipped.append(point_2)
points_clipped = np.array(points_clipped).transpose()
return points_clipped
def get_records_in_patch(self,
box_coords: Tuple[float, float, float, float],
layer_names: List[str] = None,
mode: str = 'intersect') -> Dict[str, List[str]]:
"""
Get all the record token that intersects or within a particular rectangular patch.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param layer_names: Names of the layers that we want to retrieve in a particular patch.
By default will always look for all non geometric layers.
:param mode: "intersect" will return all non geometric records that intersects the patch,
"within" will return all non geometric records that are within the patch.
:return: Dictionary of layer_name - tokens pairs.
"""
if mode not in ['intersect', 'within']:
raise ValueError("Mode {} is not valid, choice=('intersect', 'within')".format(mode))
if layer_names is None:
layer_names = self.map_api.non_geometric_layers
records_in_patch = dict()
for layer_name in layer_names:
layer_records = []
for record in getattr(self.map_api, layer_name):
token = record['token']
if self.is_record_in_patch(layer_name, token, box_coords, mode):
layer_records.append(token)
records_in_patch.update({layer_name: layer_records})
return records_in_patch
def is_record_in_patch(self,
layer_name: str,
token: str,
box_coords: Tuple[float, float, float, float],
mode: str = 'intersect') -> bool:
"""
Query whether a particular record is in a rectangular patch.
:param layer_name: The layer name of the record.
:param token: The record token.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param mode: "intersect" means it will return True if the geometric object intersects the patch and False
otherwise, "within" will return True if the geometric object is within the patch and False otherwise.
:return: Boolean value on whether a particular record intersects or is within a particular patch.
"""
if mode not in ['intersect', 'within']:
raise ValueError("Mode {} is not valid, choice=('intersect', 'within')".format(mode))
if layer_name in self.map_api.non_geometric_polygon_layers:
return self._is_polygon_record_in_patch(token, layer_name, box_coords, mode)
elif layer_name in self.map_api.non_geometric_line_layers:
return self._is_line_record_in_patch(token, layer_name, box_coords, mode)
else:
raise ValueError("{} is not a valid layer".format(layer_name))
def layers_on_point(self, x: float, y: float) -> Dict[str, str]:
"""
Returns all the polygonal layers that a particular point is on.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:return: All the polygonal layers that a particular point is on.
"""
layers_on_point = dict()
for layer_name in self.map_api.non_geometric_polygon_layers:
layers_on_point.update({layer_name: self.record_on_point(x, y, layer_name)})
return layers_on_point
def record_on_point(self, x, y, layer_name) -> str:
"""
Query what record of a layer a particular point is on.
:param x: x coordinate of the point of interest.
:param y: y coordinate of the point of interest.
:param layer_name: The non geometric polygonal layer name that we are interested in.
:return: The first token of a layer a particular point is on or '' if no layer is found.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError("{} is not a polygon layer".format(layer_name))
point = Point(x, y)
records = getattr(self.map_api, layer_name)
if layer_name == 'drivable_area':
for record in records:
polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']]
for polygon in polygons:
if point.touches(polygon):
return record['token']
else:
pass
else:
for record in records:
polygon = self.map_api.extract_polygon(record['polygon_token'])
if point.touches(polygon):
return record['token']
else:
pass
# If nothing is found, return an empty string.
return ''
def extract_polygon(self, polygon_token: str) -> Polygon:
"""
Construct a shapely Polygon object out of a polygon token.
:param polygon_token: The token of the polygon record.
:return: The polygon wrapped in a shapely Polygon object.
"""
polygon_record = self.map_api.get('polygon', polygon_token)
exterior_coords = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y'])
for token in polygon_record['exterior_node_tokens']]
interiors = []
for hole in polygon_record['holes']:
interior_coords = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y'])
for token in hole['node_tokens']]
if len(interior_coords) > 0: # Add only non-empty holes.
interiors.append(interior_coords)
return Polygon(exterior_coords, interiors)
def extract_line(self, line_token: str) -> LineString:
"""
Construct a shapely LineString object out of a line token.
:param line_token: The token of the line record.
:return: The line wrapped in a LineString object.
"""
line_record = self.map_api.get('line', line_token)
line_nodes = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y'])
for token in line_record['node_tokens']]
return LineString(line_nodes)
def get_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]:
"""
Get the bounds of the geometric object that corresponds to a non geometric record.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record.
:return: min_x, min_y, max_x, max_y of the line representation.
"""
if layer_name in self.map_api.non_geometric_polygon_layers:
return self._get_polygon_bounds(layer_name, token)
elif layer_name in self.map_api.non_geometric_line_layers:
return self._get_line_bounds(layer_name, token)
else:
raise ValueError("{} is not a valid layer".format(layer_name))
def _get_polygon_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]:
"""
Get the extremities of the polygon object that corresponds to a non geometric record.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record.
:return: min_x, min_y, max_x, max_y of of the polygon or polygons (for drivable_area) representation.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError("{} is not a record with polygon representation".format(token))
record = self.map_api.get(layer_name, token)
if layer_name == 'drivable_area':
polygons = [self.map_api.get('polygon', polygon_token) for polygon_token in record['polygon_tokens']]
exterior_node_coords = []
for polygon in polygons:
nodes = [self.map_api.get('node', node_token) for node_token in polygon['exterior_node_tokens']]
node_coords = [(node['x'], node['y']) for node in nodes]
exterior_node_coords.extend(node_coords)
exterior_node_coords = np.array(exterior_node_coords)
else:
exterior_nodes = [self.map_api.get('node', token) for token in record['exterior_node_tokens']]
exterior_node_coords = np.array([(node['x'], node['y']) for node in exterior_nodes])
xs = exterior_node_coords[:, 0]
ys = exterior_node_coords[:, 1]
x2 = xs.max()
x1 = xs.min()
y2 = ys.max()
y1 = ys.min()
return x1, y1, x2, y2
def _get_line_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]:
"""
Get the bounds of the line object that corresponds to a non geometric record.
:param layer_name: Name of the layer that we are interested in.
:param token: Token of the record.
:return: min_x, min_y, max_x, max_y of of the line representation.
"""
if layer_name not in self.map_api.non_geometric_line_layers:
raise ValueError("{} is not a record with line representation".format(token))
record = self.map_api.get(layer_name, token)
nodes = [self.map_api.get('node', node_token) for node_token in record['node_tokens']]
node_coords = [(node['x'], node['y']) for node in nodes]
node_coords = np.array(node_coords)
xs = node_coords[:, 0]
ys = node_coords[:, 1]
x2 = xs.max()
x1 = xs.min()
y2 = ys.max()
y1 = ys.min()
return x1, y1, x2, y2
def _is_polygon_record_in_patch(self,
token: str,
layer_name: str,
box_coords: Tuple[float, float, float, float],
mode: str = 'intersect') -> bool:
"""
Query whether a particular polygon record is in a rectangular patch.
:param layer_name: The layer name of the record.
:param token: The record token.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param mode: "intersect" means it will return True if the geometric object intersects the patch and False
otherwise, "within" will return True if the geometric object is within the patch and False otherwise.
:return: Boolean value on whether a particular polygon record intersects or is within a particular patch.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError('{} is not a polygonal layer'.format(layer_name))
x_min, y_min, x_max, y_max = box_coords
record = self.map_api.get(layer_name, token)
rectangular_patch = box(x_min, y_min, x_max, y_max)
if layer_name == 'drivable_area':
polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']]
geom = MultiPolygon(polygons)
else:
geom = self.map_api.extract_polygon(record['polygon_token'])
if mode == 'intersect':
return geom.intersects(rectangular_patch)
elif mode == 'within':
return geom.within(rectangular_patch)
def _is_line_record_in_patch(self,
token: str,
layer_name: str,
box_coords: Tuple[float, float, float, float],
mode: str = 'intersect') -> bool:
"""
Query whether a particular line record is in a rectangular patch.
:param layer_name: The layer name of the record.
:param token: The record token.
:param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max).
:param mode: "intersect" means it will return True if the geometric object intersects the patch and False
otherwise, "within" will return True if the geometric object is within the patch and False otherwise.
:return: Boolean value on whether a particular line record intersects or is within a particular patch.
"""
if layer_name not in self.map_api.non_geometric_line_layers:
raise ValueError("{} is not a line layer".format(layer_name))
# Retrieve nodes of this line.
record = self.map_api.get(layer_name, token)
node_recs = [self.map_api.get('node', node_token) for node_token in record['node_tokens']]
node_coords = [[node['x'], node['y']] for node in node_recs]
node_coords = np.array(node_coords)
# A few lines in Queenstown have zero nodes. In this case we return False.
if len(node_coords) == 0:
return False
# Check that nodes fall inside the path.
x_min, y_min, x_max, y_max = box_coords
cond_x = np.logical_and(node_coords[:, 0] < x_max, node_coords[:, 0] > x_min)
cond_y = np.logical_and(node_coords[:, 1] < y_max, node_coords[:, 1] > y_min)
cond = np.logical_and(cond_x, cond_y)
if mode == 'intersect':
return np.any(cond)
elif mode == 'within':
return np.all(cond)
def _render_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None:
"""
Wrapper method that renders individual layers on an axis.
:param ax: The matplotlib axes where the layer will get rendered.
:param layer_name: Name of the layer that we are interested in.
:param alpha: The opacity of the layer to be rendered.
:param tokens: Optional list of tokens to render. None means all tokens are rendered.
"""
if layer_name in self.map_api.non_geometric_polygon_layers:
self._render_polygon_layer(ax, layer_name, alpha, tokens)
elif layer_name in self.map_api.non_geometric_line_layers:
self._render_line_layer(ax, layer_name, alpha, tokens)
else:
raise ValueError("{} is not a valid layer".format(layer_name))
def _render_polygon_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None:
"""
Renders an individual non-geometric polygon layer on an axis.
:param ax: The matplotlib axes where the layer will get rendered.
:param layer_name: Name of the layer that we are interested in.
:param alpha: The opacity of the layer to be rendered.
:param tokens: Optional list of tokens to render. None means all tokens are rendered.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError('{} is not a polygonal layer'.format(layer_name))
first_time = True
records = getattr(self.map_api, layer_name)
if tokens is not None:
records = [r for r in records if r['token'] in tokens]
if layer_name == 'drivable_area':
for record in records:
polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']]
for polygon in polygons:
if first_time:
label = layer_name
first_time = False
else:
label = None
ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], alpha=alpha,
label=label))
else:
for record in records:
polygon = self.map_api.extract_polygon(record['polygon_token'])
if first_time:
label = layer_name
first_time = False
else:
label = None
ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], alpha=alpha,
label=label))
def _render_line_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None:
"""
Renders an individual non-geometric line layer on an axis.
:param ax: The matplotlib axes where the layer will get rendered.
:param layer_name: Name of the layer that we are interested in.
:param alpha: The opacity of the layer to be rendered.
:param tokens: Optional list of tokens to render. None means all tokens are rendered.
"""
if layer_name not in self.map_api.non_geometric_line_layers:
raise ValueError("{} is not a line layer".format(layer_name))
first_time = True
records = getattr(self.map_api, layer_name)
if tokens is not None:
records = [r for r in records if r['token'] in tokens]
for record in records:
if first_time:
label = layer_name
first_time = False
else:
label = None
line = self.map_api.extract_line(record['line_token'])
if line.is_empty: # Skip lines without nodes
continue
xs, ys = line.xy
if layer_name == 'traffic_light':
# Draws an arrow with the physical traffic light as the starting point, pointing to the direction on
# where the traffic light points.
ax.add_patch(Arrow(xs[0], ys[0], xs[1]-xs[0], ys[1]-ys[0], color=self.color_map[layer_name],
label=label))
else:
ax.plot(xs, ys, color=self.color_map[layer_name], alpha=alpha, label=label)
def _get_layer_geom(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_name: str) -> List[Geometry]:
"""
Wrapper method that gets the geometries for each layer.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:param layer_name: Name of map layer to be converted to binary map mask patch.
:return: List of geometries for the given layer.
"""
if layer_name in self.map_api.non_geometric_polygon_layers:
return self._get_layer_polygon(patch_box, patch_angle, layer_name)
elif layer_name in self.map_api.non_geometric_line_layers:
return self._get_layer_line(patch_box, patch_angle, layer_name)
else:
raise ValueError("{} is not a valid layer".format(layer_name))
def _layer_geom_to_mask(self,
layer_name: str,
layer_geom: List[Geometry],
local_box: Tuple[float, float, float, float],
canvas_size: Tuple[int, int]) -> np.ndarray:
"""
Wrapper method that gets the mask for each layer's geometries.
:param layer_name: The name of the layer for which we get the masks.
:param layer_geom: List of the geometries of the layer specified in layer_name.
:param local_box: The local patch box defined as (x_center, y_center, height, width), where typically
x_center = y_center = 0.
:param canvas_size: Size of the output mask (h, w).
"""
if layer_name in self.map_api.non_geometric_polygon_layers:
return self._polygon_geom_to_mask(layer_geom, local_box, layer_name, canvas_size)
elif layer_name in self.map_api.non_geometric_line_layers:
return self._line_geom_to_mask(layer_geom, local_box, layer_name, canvas_size)
else:
raise ValueError("{} is not a valid layer".format(layer_name))
@staticmethod
def mask_for_polygons(polygons: MultiPolygon, mask: np.ndarray) -> np.ndarray:
"""
Convert a polygon or multipolygon list to an image mask ndarray.
:param polygons: List of Shapely polygons to be converted to numpy array.
:param mask: Canvas where mask will be generated.
:return: Numpy ndarray polygon mask.
"""
if not polygons:
return mask
def int_coords(x):
# function to round and convert to int
return np.array(x).round().astype(np.int32)
exteriors = [int_coords(poly.exterior.coords) for poly in polygons]
interiors = [int_coords(pi.coords) for poly in polygons for pi in poly.interiors]
cv2.fillPoly(mask, exteriors, 1)
cv2.fillPoly(mask, interiors, 0)
return mask
@staticmethod
def mask_for_lines(lines: LineString, mask: np.ndarray) -> np.ndarray:
"""
Convert a Shapely LineString back to an image mask ndarray.
:param lines: List of shapely LineStrings to be converted to a numpy array.
:param mask: Canvas where mask will be generated.
:return: Numpy ndarray line mask.
"""
if lines.geom_type == 'MultiLineString':
for line in lines:
coords = np.asarray(list(line.coords), np.int32)
coords = coords.reshape((-1, 2))
cv2.polylines(mask, [coords], False, 1, 2)
else:
coords = np.asarray(list(lines.coords), np.int32)
coords = coords.reshape((-1, 2))
cv2.polylines(mask, [coords], False, 1, 2)
return mask
def _polygon_geom_to_mask(self,
layer_geom: List[Polygon],
local_box: Tuple[float, float, float, float],
layer_name: str,
canvas_size: Tuple[int, int]) -> np.ndarray:
"""
Convert polygon inside patch to binary mask and return the map patch.
:param layer_geom: list of polygons for each map layer
:param local_box: The local patch box defined as (x_center, y_center, height, width), where typically
x_center = y_center = 0.
:param layer_name: name of map layer to be converted to binary map mask patch.
:param canvas_size: Size of the output mask (h, w).
:return: Binary map mask patch with the size canvas_size.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError('{} is not a polygonal layer'.format(layer_name))
patch_x, patch_y, patch_h, patch_w = local_box
patch = self.get_patch_coord(local_box)
canvas_h = canvas_size[0]
canvas_w = canvas_size[1]
scale_height = canvas_h / patch_h
scale_width = canvas_w / patch_w
trans_x = -patch_x + patch_w / 2.0
trans_y = -patch_y + patch_h / 2.0
map_mask = np.zeros(canvas_size, np.uint8)
for polygon in layer_geom:
new_polygon = polygon.intersection(patch)
if not new_polygon.is_empty:
new_polygon = affinity.affine_transform(new_polygon,
[1.0, 0.0, 0.0, 1.0, trans_x, trans_y])
new_polygon = affinity.scale(new_polygon, xfact=scale_width, yfact=scale_height, origin=(0, 0))
if new_polygon.geom_type is 'Polygon':
new_polygon = MultiPolygon([new_polygon])
map_mask = self.mask_for_polygons(new_polygon, map_mask)
return map_mask
def _line_geom_to_mask(self,
layer_geom: List[LineString],
local_box: Tuple[float, float, float, float],
layer_name: str,
canvas_size: Tuple[int, int]) -> Optional[np.ndarray]:
"""
Convert line inside patch to binary mask and return the map patch.
:param layer_geom: list of LineStrings for each map layer
:param local_box: The local patch box defined as (x_center, y_center, height, width), where typically
x_center = y_center = 0.
:param layer_name: name of map layer to be converted to binary map mask patch.
:param canvas_size: Size of the output mask (h, w).
:return: Binary map mask patch in a canvas size.
"""
if layer_name not in self.map_api.non_geometric_line_layers:
raise ValueError("{} is not a line layer".format(layer_name))
patch_x, patch_y, patch_h, patch_w = local_box
patch = self.get_patch_coord(local_box)
canvas_h = canvas_size[0]
canvas_w = canvas_size[1]
scale_height = canvas_h/patch_h
scale_width = canvas_w/patch_w
trans_x = -patch_x + patch_w / 2.0
trans_y = -patch_y + patch_h / 2.0
map_mask = np.zeros(canvas_size, np.uint8)
if layer_name is 'traffic_light':
return None
for line in layer_geom:
new_line = line.intersection(patch)
if not new_line.is_empty:
new_line = affinity.affine_transform(new_line,
[1.0, 0.0, 0.0, 1.0, trans_x, trans_y])
new_line = affinity.scale(new_line, xfact=scale_width, yfact=scale_height, origin=(0, 0))
map_mask = self.mask_for_lines(new_line, map_mask)
return map_mask
def _get_layer_polygon(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_name: str) -> List[Polygon]:
"""
Retrieve the polygons of a particular layer within the specified patch.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:param layer_name: name of map layer to be extracted.
:return: List of Polygon in a patch box.
"""
if layer_name not in self.map_api.non_geometric_polygon_layers:
raise ValueError('{} is not a polygonal layer'.format(layer_name))
patch_x = patch_box[0]
patch_y = patch_box[1]
patch = self.get_patch_coord(patch_box, patch_angle)
records = getattr(self.map_api, layer_name)
polygon_list = []
if layer_name == 'drivable_area':
for record in records:
polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']]
for polygon in polygons:
new_polygon = polygon.intersection(patch)
if not new_polygon.is_empty:
new_polygon = affinity.rotate(new_polygon, -patch_angle,
origin=(patch_x, patch_y), use_radians=False)
new_polygon = affinity.affine_transform(new_polygon,
[1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y])
if new_polygon.geom_type is 'Polygon':
new_polygon = MultiPolygon([new_polygon])
polygon_list.append(new_polygon)
else:
for record in records:
polygon = self.map_api.extract_polygon(record['polygon_token'])
if polygon.is_valid:
new_polygon = polygon.intersection(patch)
if not new_polygon.is_empty:
new_polygon = affinity.rotate(new_polygon, -patch_angle,
origin=(patch_x, patch_y), use_radians=False)
new_polygon = affinity.affine_transform(new_polygon,
[1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y])
if new_polygon.geom_type is 'Polygon':
new_polygon = MultiPolygon([new_polygon])
polygon_list.append(new_polygon)
return polygon_list
def _get_layer_line(self,
patch_box: Tuple[float, float, float, float],
patch_angle: float,
layer_name: str) -> Optional[List[LineString]]:
"""
Retrieve the lines of a particular layer within the specified patch.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:param layer_name: name of map layer to be converted to binary map mask patch.
:return: List of LineString in a patch box.
"""
if layer_name not in self.map_api.non_geometric_line_layers:
raise ValueError("{} is not a line layer".format(layer_name))
if layer_name is 'traffic_light':
return None
patch_x = patch_box[0]
patch_y = patch_box[1]
patch = self.get_patch_coord(patch_box, patch_angle)
line_list = []
records = getattr(self.map_api, layer_name)
for record in records:
line = self.map_api.extract_line(record['line_token'])
if line.is_empty: # Skip lines without nodes.
continue
new_line = line.intersection(patch)
if not new_line.is_empty:
new_line = affinity.rotate(new_line, -patch_angle, origin=(patch_x, patch_y), use_radians=False)
new_line = affinity.affine_transform(new_line,
[1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y])
line_list.append(new_line)
return line_list
@staticmethod
def get_patch_coord(patch_box: Tuple[float, float, float, float],
patch_angle: float = 0.0) -> Polygon:
"""
Convert patch_box to shapely Polygon coordinates.
:param patch_box: Patch box defined as [x_center, y_center, height, width].
:param patch_angle: Patch orientation in degrees.
:return: Box Polygon for patch_box.
"""
patch_x, patch_y, patch_h, patch_w = patch_box
x_min = patch_x - patch_w / 2.0
y_min = patch_y - patch_h / 2.0
x_max = patch_x + patch_w / 2.0
y_max = patch_y + patch_h / 2.0
patch = box(x_min, y_min, x_max, y_max)
patch = affinity.rotate(patch, patch_angle, origin=(patch_x, patch_y), use_radians=False)
return patch
|
from config import *
import numpy as np
class Test_model():
"""
Class builds container with predictive models based
Parameters
----------
train: tf.data.Datasets
Тренировочный, предобработатнный датасет
"""
def __init__(self,
models:list=[],
image=None,
label=None,):
self.models = models
self.image = image
self.label = label
def _tensor_to_chars(self, tensor):
'Преоброзование тензора в символьную строку'
label_pred = []
for i in tensor:
indexes = []
for char_vector in i:
indexes.append(np.argmax(char_vector))
label_pred.append(indexes)
for j, indexes in enumerate(label_pred):
code = ''
for i in indexes:
code += ALL_CHARS[i]
label_pred[j] = code
return label_pred
def test_data(self):
'Тестирование моделей на тестовой выборке'
for model in self.models:
correct = 0
total = 0
predicted_code = np.array(model['model_class'].predict(self.image))
predicted_code = np.transpose(predicted_code, (1, 0, 2))
label_pred = self._tensor_to_chars(predicted_code)
labels = self._tensor_to_chars (self.label)
total += len(labels)
correct += len([i for i, j in zip(labels, label_pred) if i == j])
model['result'] = correct / total
return self.models
|
"""
Copyright (c) 2018-2021 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
* The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details
provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
* Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
* This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from .....app import db
from time import strftime
class OemResponse(db.Model):
""" Class to create DB table oem_response."""
__tablename__ = 'oem_response'
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
oem_imei = db.Column(db.String(50), unique=True)
oem_tac = db.Column(db.String(8), nullable=False)
gsma_brand = db.Column(db.String(300))
oem_other_imeis = db.Column(db.ARRAY(db.String))
oem_all_imeis = db.Column(db.ARRAY(db.String))
oem_serial_no = db.Column(db.String(200))
oem_color = db.Column(db.String(100))
oem_brand = db.Column(db.String(300))
oem_model = db.Column(db.String(300))
oem_rat = db.Column(db.String(100))
oem_mac = db.Column(db.String(100))
oem_response_date = db.Column(db.DateTime, default=strftime("%Y-%m-%d %H:%M:%S"))
oem_id = db.Column(db.String(50), db.ForeignKey('oem_logins.oem_id'))
def __repr__(self): # pragma: no cover
return "<OemResponse({},{}, {}, {}, {}, {}, {}, {}, {})>".format(self.id, self.oem_imei, self.oem_tac,
self.gsma_brand, self.oem_serial_no,
self.oem_color, self.oem_brand, self.oem_model,
self.oem_response_date)
|
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/imageworks/OpenShadingLanguage
# Turn an LLVM-compiled bitfile into a C++ source file where the compiled
# bitcode is in a huge array.
from __future__ import print_function, absolute_import
import sys
in_name = sys.argv[1]
out_name = sys.argv[2]
prefix = sys.argv[3]
f_in = open(in_name, 'rb')
f_out = open(out_name, 'w')
f_out.write('#include <cstddef>\n')
f_out.write('unsigned char ' + prefix + '_block[] = {\n')
f_in.read
if (sys.version_info > (3, 0)):
for c in f_in.read():
f_out.write(hex(c) + ',\n')
else:
for c in f_in.read():
f_out.write('0x{},\n'.format(c.encode('hex')))
f_out.write('0x00 };\n')
f_out.write('int {}_size = sizeof({}_block)-1;\n'.format(prefix, prefix))
|
"""Base classes and definitions common to all Fields."""
import abc
import enum
import functools
import io
import os
import typing
import warnings
from typing import Any
from typing import BinaryIO
from typing import Callable
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Type
from typing import TypeVar
from typing import Union
import more_itertools as m_iter
from binobj import errors
from binobj import helpers
from binobj.typedefs import FieldValidator
from binobj.typedefs import StrDict
if typing.TYPE_CHECKING: # pragma: no cover
from typing import Collection
from binobj.structures import Struct
from binobj.structures import StructMetadata
__all__ = ["DEFAULT", "NOT_PRESENT", "UNDEFINED", "Field"]
class _Default(enum.Enum):
token = 0
class _Undefined(enum.Enum):
token = 0
class _NotPresent(enum.Enum):
token = 0
UNDEFINED = _Undefined.token
"""A sentinel value used to indicate that a setting or field is undefined."""
DEFAULT = _Default.token
"""A sentinel value used to indicate that the default value of a setting should be used.
We need this because sometimes ``None`` is a valid value for that setting.
"""
NOT_PRESENT = _NotPresent.token
"""A sentinel value used to indicate that a field is not present.
.. versionadded:: 0.4.5
"""
T = TypeVar("T")
class Field(Generic[T]):
r"""The base class for all struct fields.
:param str name:
The name of the field.
:param const:
A constant value this field is expected to take. It will always have this value
when dumped, and will fail validation if the field isn't this value when loaded.
Useful for reserved fields and file tags.
This argument *must* be of the same type as the field, i.e. it must be a string
for a :class:`~binobj.fields.stringlike.String`, an integer for an
:class:`~binobj.fields.numeric.Integer`, and so on.
:param default:
The default value to use if a value for this field isn't passed to the struct
for serialization, or a callable taking no arguments that will return a default
value.
This argument (or the return value of the callable) *must* be of the same type
as the field, i.e. it must be a string for a
:class:`~binobj.fields.stringlike.String`, an integer for an
:class:`~binobj.fields.numeric.Integer`, and so on.
:param bool discard:
When deserializing, don't include this field in the returned results. This means
that you won't be able to use the value for anything later. For example, if you
need to reference it in a ``present`` function like so::
name_size = fields.UInt16(discard=True)
filename = fields.StringZ(encoding="utf-8")
_filename_padding = fields.Bytes(
const=b"\0", discard=True, present=lambda f, *_: f["name_size"] % 2
)
this will crash with a :class:`KeyError` because ``name_size`` was discarded.
:param null_value:
Either a byte string or a value to use to represent ``None`` in serialized data.
When loading, the returned value will be ``None`` if this exact sequence of
bytes is encountered. If not given, the field is considered "not nullable" and
any attempt to assign ``None`` to it will result in a crash upon serialization.
:param size:
Optional. The size of the field. This can be a number of things:
* An integer constant. The field will always be the same size, no matter what
value is given to it.
* Another :class:`.Field` object. That field gives the size of this field, in
bytes.
* A string naming another field. It's equivalent to passing in a :class:`.Field`
instance, except used for references in the same class or a field defined in
the superclass.
:param validate:
A callable or list of callables that validates a given value for this field. The
callable(s) will always be passed the deserialized value, so a validator for an
:class:`~binobj.fields.numeric.Integer` field will always be passed an integer,
a :class:`~binobj.fields.stringlike.String` validator will always be passed a
string, and so on.
:param callable present:
Optional. A callable that, when called with the struct as its argument,
returns a boolean indicating if this field is "present" and should be loaded or
dumped. For example, if we have a ``flags`` field that's a bitmap indicating
what fields come next, we could have something like this::
flags = fields.UInt16()
foo = fields.StringZ(present=lambda v, *_: v["flags"] & 0x8000)
bar = fields.StringZ(present=lambda v, *_: v["flags"] & 0x4000)
Thus, if and only if ``flags`` has bit 15 set, ``foo`` will be read from the
stream next. If ``flags`` has bit 15 clear, ``foo`` will be assigned the field's
:attr:`not_present_value` (defaults to :data:`NOT_PRESENT`).
The callable takes three positional arguments:
- A dict of the fields that have already been loaded or are about to be dumped.
- The ``context`` object passed to :meth:`from_stream` or :meth:`to_stream`.
- When loading, the stream being loaded from. The stream pointer MUST be reset
to its original position before the function returns.
:param not_present_value:
A custom value to return if a field is missing when loading (see the ``present``
argument). It can be ``None`` or match the datatype of the field, i.e. a string
for a :class:`~binobj.fields.stringlike.String`, an integer for an
:class:`~binobj.fields.numeric.Integer`, and so on. If not given, defaults to
:data:`NOT_PRESENT`.
.. attribute:: index
The zero-based index of the field in the struct.
:type: int
.. attribute:: offset
The zero-based byte offset of the field in the struct. If the offset can't be
computed (e.g. it's preceded by a variable-length field), this will be ``None``.
:type: int
.. versionadded:: 0.4.5
The ``present`` argument.
.. versionchanged:: 0.8.0
This now inherits from :class:`typing.Generic`.
.. versionadded:: 0.9.0
* The ``not_present_value`` argument.
* ``size`` has full support for :class:`Field`\s and field name values. This
used to be only supported by some fields, with others left out by accident.
.. versionchanged:: 0.9.0
``null_value`` can now also be a deserialized value. For example, you could pass
r"\\N" for a string or 0 for an integer.
.. deprecated:: 0.9.0
Passing :data:`DEFAULT` to ``null_value`` for unsized fields such as
:class:`~binobj.fields.stringlike.StringZ` is deprecated and will trigger an
error in the future. This resolves the asymmetric behavior where using
:data:`DEFAULT` throws an error when dumping but happily loads whatever's next
in the stream when loading.
"""
__overrideable_attributes__ = () # type: Union[Collection[str], Mapping[str, str]]
# TODO (dargueta): Define __explicit_init_args__ attribute once we drop 3.5 support
def __new__(cls: Type["Field[Any]"], *_args, **kwargs: Any) -> "Field[Any]":
"""Create a new instance, recording which keyword arguments were passed in.
Recording the explicit arguments is necessary so that a field can use the
fallback values its container class gives for anything else.
"""
instance = super().__new__(cls)
instance.__explicit_init_args__ = frozenset(kwargs.keys())
return instance
def __init__(
self,
*,
name: str = None,
const: Union[T, _Undefined] = UNDEFINED,
default: Union[Optional[T], Callable[[], Optional[T]], _Undefined] = UNDEFINED,
discard: bool = False,
null_value: Union[bytes, _Default, _Undefined, T] = UNDEFINED,
size: Union[int, str, "Field[int]", None] = None,
validate: Union[FieldValidator, Iterable[FieldValidator]] = (),
present: Callable[[StrDict, Any, Optional[BinaryIO]], bool] = (lambda *_: True),
not_present_value: Union[T, None, _NotPresent] = NOT_PRESENT
):
self.const = const
self.discard = discard
self.null_value = null_value
self.present = present
self.not_present_value = not_present_value
self.validators = [
functools.partial(v, self) for v in m_iter.always_iterable(validate)
]
if default is UNDEFINED and const is not UNDEFINED:
# If no default is given but ``const`` is, set the default value to
# ``const``.
self._default = (
const
) # type: Union[Optional[T], Callable[[], Optional[T]], _Undefined]
else:
self._default = default
# These attributes are typically set by the struct containing the field after
# the field's instantiated.
self.name = typing.cast(str, name)
self.index = typing.cast(int, None)
self.offset = None # type: Optional[int]
self._compute_fn = (
None
) # type: Optional[Callable[["Field[T]", StrDict], Optional[T]]]
if size is not None or const is UNDEFINED:
self._size = size
else:
self._size = self._size_for_value(const)
@property
def size(self) -> Union[int, str, "Field[int]", None]:
"""The size of this field, in bytes.
If the field is of variable size, such as a null-terminated string, this will be
``None``. Builtin fields set this automatically if ``const`` is given but you'll
need to implement :meth:`_size_for_value` in custom fields.
""" # noqa: D400,D401
# TODO (dargueta) This return value is horrific. Rework it if possible.
return self._size
@property
def has_fixed_size(self) -> bool:
"""Does this field have a fixed size?
.. versionadded:: 0.9.0
""" # noqa: D400,D401
return isinstance(self.size, int)
def bind_to_container(
self,
struct_info: "StructMetadata",
name: str,
index: int,
offset: Optional[int] = None,
) -> None:
"""Bind this field to a Struct and apply any predefined defaults.
:param binobj.structures.StructMetadata struct_info:
The metadata object describing the Struct this field will be bound into.
:param str name:
The name of this field.
:param int index:
The index of this field in the container.
:param int offset:
The byte offset of this field in the container, or ``None`` if unknown. This
is usually equal to the sum of the sizes of the fields preceding this one in
the container.
.. versionchanged:: 0.10.0
Added the ``struct_info`` parameter.
"""
self.name = name
self.index = index
self.offset = offset
if not isinstance(self.__overrideable_attributes__, Mapping):
overrideables = typing.cast(
Mapping[str, Any], {n: n for n in self.__overrideable_attributes__}
)
else:
overrideables = self.__overrideable_attributes__
for argument_name, attribute_name in overrideables.items():
# TODO (dargueta): Remove this directive once we drop 3.5 support
if argument_name in self.__explicit_init_args__: # type: ignore
continue
typed_default_name = type(self).__name__ + "__" + argument_name
if typed_default_name in struct_info.argument_defaults:
setattr(
self,
attribute_name,
struct_info.argument_defaults[typed_default_name],
)
elif argument_name in struct_info.argument_defaults:
setattr(
self, attribute_name, struct_info.argument_defaults[argument_name]
)
# Else: struct doesn't define a default value
def compute_value_for_dump(
self, all_values: StrDict
) -> Union[Optional[T], _NotPresent]:
"""Calculate the value for this field upon dumping.
:param dict all_values:
The dictionary of all the field data that's about to be dumped.
:return:
The value the dumper will use for this field, or :data:`NOT_PRESENT` if the
field shouldn't be serialized. It *will not* return
:attr:`.not_present_value` in this case, as the field should not be dumped
at all.
:raise MissingRequiredValueError:
No value could be derived for this field. It's missing in the input data,
there's no default defined, and it doesn't have a compute function defined
either.
.. versionadded:: 0.3.1
.. versionchanged:: 0.8.0
If ``default`` is given by a callable and that callable returns
:data:`UNDEFINED`, it will throw :class:`~.errors.MissingRequiredValueError`
instead of returning :data:`UNDEFINED`.
"""
# FIXME (dargueta): Don't pass None for the context variable.
if not self.present(all_values, None, None):
return NOT_PRESENT
if self.name in all_values:
return typing.cast(Optional[T], all_values[self.name])
if self._default is not UNDEFINED:
# Theoretically if self._default is a callable that returns UNDEFINED we
# could run into trouble here. Get the return value.
to_return = self.default
if to_return is not UNDEFINED:
return to_return
if self._compute_fn is not None:
return self._compute_fn(self, all_values)
raise errors.MissingRequiredValueError(field=self)
def computes(self, method: Callable[["Field[T]", StrDict], Optional[T]]) -> None:
"""Decorator that marks a function as computing the value for a field.
.. deprecated:: 0.6.0
This decorator will be moved to :mod:`binobj.decorators`.
You can use this for automatically assigning values based on other fields. For
example, suppose we have this struct::
class MyStruct(Struct):
n_numbers = UInt8()
numbers = Array(UInt8(), count=n_numbers)
This works great for loading, but when we're dumping we have to pass in a value
for ``n_numbers`` explicitly. We can use the ``computes`` decorator to relieve
us of that burden::
class MyStruct(Struct):
n_numbers = UInt8()
numbers = Array(UInt8(), count=n_numbers)
@n_numbers.computes
def _assign_n_numbers(self, all_fields):
return len(all_fields['numbers'])
Some usage notes:
* The computing function will *not* be called if
* A value is explicitly set for the field by the calling code.
* The field has a ``default`` or ``const`` value.
* Computed fields are executed in the order that the fields are dumped, so a
computed field must *not* rely on the value of another computed field
occurring after it.
.. versionadded:: 0.3.0
""" # noqa: D401
if self._compute_fn:
raise errors.ConfigurationError(
"Can't define two computing functions for field %r." % self, field=self
)
if self.const is not UNDEFINED:
raise errors.ConfigurationError(
"Cannot set compute function for a const field.", field=self
)
warnings.warn(
"This decorator will be moved to the `decorators` module.",
PendingDeprecationWarning,
)
self._compute_fn = method
@property
def allow_null(self) -> bool:
"""Is ``None`` an acceptable value for this field?
:type: bool
""" # noqa: D400
return self.null_value is not UNDEFINED
@property
def default(self) -> Union[T, None, _Undefined]:
"""The default value of this field, or :data:`UNDEFINED`.
If the default value passed to the constructor was a callable, this property
will always give its return value. That callable is invoked on each access of
this property.
.. versionchanged:: 0.6.1
If no default is defined but ``const`` is, this property returns the value
for ``const``.
""" # noqa: D401
default_value = self._default
if callable(default_value):
return default_value()
if default_value is UNDEFINED and self.const is not UNDEFINED:
return self.const
return default_value
@property
def required(self) -> bool:
"""Is this field required for serialization?
:type: bool
""" # noqa: D400
return self.const is UNDEFINED and self.default is UNDEFINED
def _size_for_value(self, value: Optional[T]) -> Optional[int]:
"""Get the size of the serialized value, or ``None`` if it can't be computed.
This is an ugly hack for computing ``size`` properly when only ``const`` is
given. It's *HIGHLY DISCOURAGED* to implement this function in your own field
subclasses, since it *must not* call :meth:`from_stream`, :meth:`from_bytes`,
:meth:`to_stream`, or :meth:`to_bytes`. Doing so could result in infinite
recursion.
:param value:
The value to serialize.
:return:
The size of ``value`` when serialized, in bytes. If the size cannot be
computed, return ``None``.
:rtype: int
"""
if self.has_fixed_size:
return typing.cast(int, self._size)
return None
def _get_expected_size(self, field_values: StrDict) -> int:
"""Compatibility shim -- this function was made public in 0.9.0."""
warnings.warn(
"_get_expected_size was made public in 0.9.0. The private form has been"
" deprecated and will be removed in 1.0.",
DeprecationWarning,
)
return self.get_expected_size(field_values)
def get_expected_size(self, field_values: StrDict) -> int:
"""Determine the size of this field in bytes, given values for other fields.
:param dict field_values:
A dict mapping field names to their resolved values.
:return:
The number of bytes this field is expected to occupy.
:rtype: int
:raise MissingRequiredValueError:
The field's size references another field but the other field is missing
from ``field_values``.
:raise UndefinedSizeError:
The field doesn't have a defined size nor refers to another field to
determine its size.
.. versionchanged:: 0.9.0
This used to be a private method. ``_get_expected_size()`` is still present
for compatibility but it will eventually be removed.
"""
if self.has_fixed_size:
return typing.cast(int, self.size)
if self.size is None:
# Field has an undefined size. If the caller gave us a value for that field,
# or if we have a default value defined, we might be able to determine the
# size of that value.
if self.name in field_values:
expected_size = self._size_for_value(field_values[self.name])
elif self.default is not UNDEFINED:
expected_size = self._size_for_value(self.default)
else:
expected_size = None
if expected_size is not None:
return expected_size
raise errors.UndefinedSizeError(field=self)
if isinstance(self.size, Field):
name = self.size.name
elif isinstance(self.size, str):
name = self.size
else:
raise TypeError(
"Unexpected type for %s.size: %s"
% (self.name, type(self.size).__name__)
)
if name in field_values:
return field_values[name]
raise errors.MissingRequiredValueError(field=name)
def from_stream( # noqa: C901
self,
stream: BinaryIO,
context: Any = None,
loaded_fields: Optional[StrDict] = None,
) -> Union[Optional[T], _NotPresent]:
"""Load data from the given stream.
:param BinaryIO stream:
The stream to load data from.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict loaded_fields:
A dictionary of the fields that have already been loaded. This is set
automatically when a field is loaded by a
:class:`~binobj.structures.Struct`.
:return: The deserialized data, or :data:`NOT_PRESENT`
"""
if loaded_fields is None:
loaded_fields = {}
if not self.present(loaded_fields, context, stream):
return self.not_present_value
# If the caller passed in `null_value` as a byte string we'll peek ahead in the
# stream and see if the bytes match up.
if self.allow_null:
try:
null_repr = self._get_null_repr(loaded_fields)
except errors.UnserializableValueError as err:
# Null can't be represented in this current state, so we can't check to
# see if the *raw binary* form is null. This isn't an error UNLESS
# null_value is `DEFAULT`. If null_value is DEFAULT and we can't
# determine the size, then we're out of luck.
if self.null_value is DEFAULT:
raise errors.CannotDetermineNullError(field=self) from err.__cause__
else:
potential_null_bytes = helpers.peek_bytes(stream, len(null_repr))
if potential_null_bytes == null_repr:
# If we get here then the bytes we read ahead match null_value. Move
# the stream pointer to the beginning of the next field.
stream.seek(len(null_repr), os.SEEK_CUR)
for validator in self.validators:
validator(None)
return None
# else: the bytes we read didn't match null_value. Fall through and try to
# load the value using the field's normal loading code.
# TODO (dargueta): This try-catch just to set the field feels dumb.
try:
loaded_value = self._do_load(
stream, context=context, loaded_fields=loaded_fields
)
except errors.DeserializationError as err:
err.field = self
raise
# Here we handle the case where null_value is DEFAULT or an instance of `T`
# rather than a byte string. Note that there's no way for us to determine upon
# loading if something matches DEFAULT. This is why we're deprecating setting
# `null_value` to DEFAULT.
if self.allow_null and loaded_value == self.null_value:
loaded_value = None
# TODO (dargueta): Change this to a validator instead.
if self.const is not UNDEFINED and loaded_value != self.const:
raise errors.ValidationError(field=self, value=loaded_value)
for validator in self.validators:
validator(loaded_value)
return loaded_value
def from_bytes(
self,
data: bytes,
context: Any = None,
exact: bool = True,
loaded_fields: Optional[StrDict] = None,
) -> Union[Optional[T], _NotPresent]:
"""Load from the given byte string.
:param bytes data:
A bytes-like object to get the data from.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param bool exact:
``data`` must contain exactly the number of bytes required. If not all the
bytes in ``data`` were used when reading the struct, throw an exception.
:param dict loaded_fields:
A dictionary of the fields that have already been loaded. This is set
automatically when a field is loaded by a
:class:`~binobj.structures.Struct`.
:return: The deserialized data, or :data:`NOT_PRESENT` if the field is missing.
"""
if loaded_fields is None:
loaded_fields = {}
stream = io.BytesIO(data)
loaded_data = self.from_stream(stream, context, loaded_fields)
if exact and (stream.tell() < len(data)):
# TODO (dargueta): Better error message.
raise errors.ExtraneousDataError(
"Expected to read %d bytes, read %d." % (stream.tell(), len(data))
)
return loaded_data
@abc.abstractmethod
def _do_load(
self, stream: BinaryIO, context: Any, loaded_fields: StrDict
) -> Optional[T]:
"""Load an object from the stream.
:param BinaryIO stream:
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict loaded_fields:
A dictionary of the fields that have already been loaded. This is guaranteed
to not be ``None``.
:return: The loaded object.
"""
raise NotImplementedError
def to_stream(
self,
stream: BinaryIO,
data: Union[Optional[T], _Default] = DEFAULT,
context: Any = None,
all_fields: Optional[StrDict] = None,
) -> None:
"""Convert the given data into bytes and write it to ``stream``.
:param BinaryIO stream:
The stream to write the serialized data into.
:param data:
The data to dump. Can be omitted only if this is a constant field or if a
default value is defined.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict all_fields:
A dictionary of the fields about to be dumped. This is automatically set by
the field's containing :class:`~binobj.structures.Struct`.
"""
if all_fields is None:
all_fields = {}
if data is DEFAULT:
# Typecast is not entirely truthful; this may return UNDEFINED if the field
# has no default value.
data = typing.cast(Optional[T], self.default)
if data is UNDEFINED or data is DEFAULT:
raise errors.MissingRequiredValueError(field=self)
for validator in self.validators:
validator(data)
if data is None:
stream.write(self._get_null_repr(all_fields))
return
for validator in self.validators:
validator(data)
self._do_dump(stream, data, context=context, all_fields=all_fields)
def to_bytes(
self,
data: Union[Optional[T], _Default] = DEFAULT,
context: Any = None,
all_fields: Optional[StrDict] = None,
) -> bytes:
"""Convert the given data into bytes.
:param data:
The data to dump. Can be omitted only if this is a constant field or a
default value is defined.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict all_fields:
A dictionary of the fields about to be dumped. This is automatically set by
the field's containing :class:`~binobj.structures.Struct`.
:return: The serialized data.
:rtype: bytes
"""
stream = io.BytesIO()
self.to_stream(stream, data, context=context, all_fields=all_fields)
return stream.getvalue()
@abc.abstractmethod
def _do_dump(
self, stream: BinaryIO, data: T, context: Any, all_fields: StrDict
) -> None:
"""Write the given data to the byte stream.
:param BinaryIO stream:
The stream to write to.
:param data:
The data to dump. Guaranteed to not be ``None``.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict all_fields:
A dictionary of the fields about to be dumped. This is guaranteed to not be
``None``.
"""
raise errors.UnserializableValueError(field=self, value=data)
def _get_null_repr(self, all_fields: Optional[StrDict] = None) -> bytes:
"""Return the serialized value for ``None``.
We need this function because there's some logic involved in determining if
``None`` is a legal value, and guessing the serialization if no default value is
provided.
:return: The serialized form of ``None`` for this field.
:rtype: bytes
"""
if all_fields is None:
all_fields = {}
if self.null_value is UNDEFINED:
raise errors.UnserializableValueError(
reason="`None` is not an acceptable value for %s." % self,
field=self,
value=None,
)
if self.null_value is not DEFAULT:
if isinstance(self.null_value, bytes):
return self.null_value
return self.to_bytes(self.null_value, all_fields=all_fields)
# User wants us to use all null bytes for the default null value.
try:
return b"\0" * self.get_expected_size(all_fields)
except errors.UndefinedSizeError:
raise errors.UnserializableValueError(
reason="Can't guess appropriate serialization of `None` for %s "
"because it has no fixed size." % self,
field=self,
value=None,
)
def _read_exact_size(
self, stream: BinaryIO, loaded_fields: Optional[StrDict] = None
) -> bytes:
"""Read exactly the number of bytes this object takes up or crash.
:param BinaryIO stream: The stream to read from.
:param dict loaded_fields:
A dict mapping names of fields to their loaded values. This allows us to
read a variable-length field that depends on the value of another field
occurring before it.
:return: The correct number of bytes are read from the stream.
:rtype: bytes
:raise UnexpectedEOFError: Not enough bytes were left in the stream.
.. versionchanged:: 0.6.1
* Variable-length fields are now supported.
* The ``loaded_fields`` argument.
"""
if loaded_fields is None:
loaded_fields = {}
offset = stream.tell()
n_bytes = self.get_expected_size(loaded_fields)
data_read = stream.read(n_bytes)
if len(data_read) < n_bytes:
raise errors.UnexpectedEOFError(field=self, size=n_bytes, offset=offset)
return data_read
@overload
def __get__(self, instance: None, owner: Type["Struct"]) -> "Field[T]":
...
@overload
def __get__(self, instance: "Struct", owner: Type["Struct"]) -> Optional[T]:
...
# This annotation is bogus and only here to make MyPy happy. See bug report here:
# https://github.com/python/mypy/issues/9416
@overload
def __get__(self, instance: "Field[Any]", owner: Type["Field[Any]"]) -> "Field[T]":
...
def __get__(self, instance, owner):
if instance is None:
return self
if self.name in instance.__values__:
return instance.__values__[self.name]
return self.compute_value_for_dump(instance)
def __set__(self, instance: "Struct", value: Optional[T]) -> None:
if self._compute_fn or self.const is not UNDEFINED:
raise errors.ImmutableFieldError(field=self)
for validator in self.validators:
validator(value)
instance.__values__[self.name] = value
def __str__(self) -> str:
return "%s(name=%r)" % (type(self).__name__, self.name)
def __repr__(self) -> str:
return "<%s.%s>" % (self.__module__, self)
|
"""
Created on Aug 26, 2011
@author: guillaume
"""
# Imports
from scipy import (zeros,
asarray,
pi, cos, sqrt)
from scipy.constants import hbar, mu_0
from chemex.constants import gamma
from chemex.bases.two_states.iph_aph import (R_IXY, R_2SZIXY, DR_XY,
R_IZ, R_2SZIZ, CS, DW,
J, DJ, ETAXY, ETAZ,
KAB, KBA, W1X, W1Y)
def compute_liouvillians(pb=0.0, kex=0.0, dw=0.0, r_nxy=5.0, dr_nxy=0.0,
r_nz=1.5, r_2hznz=5.0, etaxy=0.0, etaz=0.0,
j_hn=-93.0, dj_hn=0.0, cs_offset=0.0, w1=0.0):
"""
Compute the exchange matrix (Liouvillian)
The function assumes a 2-site (A <-> B) exchanging system.
The matrix is written in 12x12 cartesian basis, that is:
{Nx, Ny, Nz, 2HzNx, 2HzNy, 2HzNz}{a,b}.
Here the thermal equilibrium is assumed to be 0. This is justified because of
the +/- phase cycling of the first 90 degree pulse at the beginning of the
cpmg block.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
kex : float
Exchange rate between state A and B in /s.
dw : float
Chemical shift difference between states A and B in rad/s.
r_nz : float
Longitudinal relaxation rate of state {a,b} in /s.
r_nxy : float
Transverse relaxation rate of state a in /s.
dr_nxy : float
Transverse relaxation rate difference between states a and b in /s.
r_2hznz : float
2-spin order longitudinal relaxation rate in /s.
etaxy : float
Transverse cross-correlated relaxation rate in /s.
etaz : float
Longitudinal cross-correlated relaxation rate in /s.
j_hn : float
Scalar coupling between N and HN in Hz.
dj_hn : float
Scalar coupling difference between states a and b in Hz.
cs_offset : float
Offset from the carrier in rad/s.
Returns
-------
out: numpy.matrix
Liouvillian describing free precession of one
isolated spin in presence of two-site exchange.
"""
kab = kex * pb
kba = kex - kab
r_2hznxy = r_nxy + r_2hznz - r_nz
l_free = R_IXY * r_nxy
l_free += R_2SZIXY * r_2hznxy
l_free += DR_XY * dr_nxy
l_free += R_IZ * r_nz
l_free += R_2SZIZ * r_2hznz
l_free += CS * cs_offset
l_free += DW * dw
l_free += J * pi * j_hn
l_free += DJ * pi * dj_hn
l_free += ETAXY * etaxy
l_free += ETAZ * etaz
l_free += KAB * kab
l_free += KBA * kba
l_w1x, l_w1y = w1 * asarray([W1X, W1Y])
return l_free, l_w1x, l_w1y
def compute_2hznz_eq(pb):
mag_eq = zeros((12, 1))
mag_eq[5, 0] += (1.0 - pb)
mag_eq[11, 0] += pb
return mag_eq
def get_trz(mag):
magz_a = mag[5, 0] - mag[2, 0]
magz_b = mag[11, 0] - mag[8, 0]
return magz_a, magz_b
def compute_nh_etaz(r_nz, ppm_to_rads):
# TODO: replace with appropriate code for HN(dip)/H(csa) calculation
"""
THIS IS NOT PRESENTLY IMPLEMENTED.
X-CORRELATION RATES ARE ZERO UNLESS EXPLICITLY SPECIFIED
Approximates etaxy and etaz (NH-dipolar/N-csa cross-correlated relaxation
rates) using inphase and longitudinal rates.
Arguments:
r_Hxy -- transverse relaxationrate of N15 nucleus in /s,
r_Hxy = 5.0 (default)
r_Hz -- longitudinal relaxation rate of N15 nucleus in /s,
r_Hz = 1.5 (default)
ppm_to_rads -- unit conversion factor for desired nucleus & field
Returns:
etaxy, etaz
float, float
"""
delta_csa_nh = -166.0 # ppm
r_nh = 1.04e-10 # meters
sqrt3 = sqrt(3.0)
geo_factor = 0.5 * (3.0 * cos(19.6 / 180.0 * pi) ** 2 - 1.0)
cc = delta_csa_nh * ppm_to_rads / sqrt3
dd = -mu_0 * hbar * gamma['H'] * gamma['N'] / (4.0 * pi * r_nh ** 3)
cc2 = cc ** 2
dd2 = dd ** 2
ccdd = cc * dd
jwn = r_nz / (cc2 + 0.75 * dd2)
etaz = sqrt3 * geo_factor * ccdd * jwn
return etaz
|
"""
This module contains various classes for performing
tokenization, stemming, and filtering.
"""
from miso.data.tokenizers.tokenizer import Token, Tokenizer
from miso.data.tokenizers.word_tokenizer import WordTokenizer
|
import os
import time
from pathlib import Path
from typing import Optional
import fsspec
import posixpath
from aiohttp.client_exceptions import ServerDisconnectedError
from .. import config
from .download_manager import DownloadConfig, map_nested
from .file_utils import get_authentication_headers_for_url, is_local_path, is_relative_path, url_or_path_join
from .logging import get_logger
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = ["txt", "csv", "json", "jsonl", "tsv", "conll"]
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = a.split("::")
if is_local_path(a):
a = Path(a, *p).as_posix()
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except ServerDisconnectedError:
logger.warning(
f"Got diconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected")
return out
file_obj.read = read_with_retries
def xopen(file, mode="r", *args, **kwargs):
"""
This function extends the builin `open` function to support remote files using fsspec.
It also has a retry mechanism in case connection fails.
The args and kwargs are passed to fsspec.open, except `use_auth_token` which is used for queries to private repos on huggingface.co
"""
if fsspec.get_fs_token_paths(file)[0].protocol == "https":
kwargs["headers"] = get_authentication_headers_for_url(file, use_auth_token=kwargs.pop("use_auth_token", None))
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
class StreamingDownloadManager(object):
"""
Download manager that uses the "::" separator to naviguate through (possibly remote) compressed archives.
Contrary to the regular DownloadManager, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
builtin `open` function to stream data from remote files.
"""
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._download_config = download_config or DownloadConfig()
self._base_path = base_path or os.path.abspath(".")
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, url_or_filename):
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
return url_or_filename
def extract(self, path_or_paths):
urlpaths = map_nested(self._extract, path_or_paths, map_tuple=True)
return urlpaths
def _extract(self, urlpath):
protocol = self._get_extraction_protocol(urlpath)
if protocol is None:
# no extraction
return urlpath
elif protocol == "gzip":
# there is one single file which is the uncompressed gzip file
return f"{protocol}://{os.path.basename(urlpath.split('::')[0]).rstrip('.gz')}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def _get_extraction_protocol(self, urlpath) -> Optional[str]:
path = urlpath.split("::")[0]
if path.split(".")[-1] in BASE_KNOWN_EXTENSIONS:
return None
elif path.endswith(".gz") and not path.endswith(".tar.gz"):
return "gzip"
elif path.endswith(".zip"):
return "zip"
raise NotImplementedError(f"Extraction protocol for file at {urlpath} is not implemented yet")
def download_and_extract(self, url_or_urls):
return self.extract(self.download(url_or_urls))
|
'''initialize'''
from .userweibospider import UserWeiboSpider
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import sys
import time
from collections import OrderedDict
from ..actors import ActorNotExist
from ..compat import OrderedDict3
from ..errors import WorkerProcessStopped
from ..config import options
from ..promise import PromiseActor
from ..cluster_info import HasClusterInfoActor
logger = logging.getLogger(__name__)
class WorkerActor(HasClusterInfoActor, PromiseActor):
"""
Base class of all worker actors, providing necessary utils
"""
@classmethod
def default_name(cls):
return 'w:{0}'.format(cls.__name__)
def post_create(self):
logger.debug('Actor %s running in process %d', self.uid, os.getpid())
try:
self.set_cluster_info_ref()
except ActorNotExist:
pass
self._init_chunk_store()
def _init_chunk_store(self):
import pyarrow.plasma as plasma
from .chunkstore import PlasmaChunkStore, PlasmaKeyMapActor
mapper_ref = self.ctx.actor_ref(uid=PlasmaKeyMapActor.default_name())
self._plasma_client = plasma.connect(options.worker.plasma_socket, '', 0)
self._chunk_store = PlasmaChunkStore(self._plasma_client, mapper_ref)
def get_meta_ref(self, session_id, chunk_key, local=True):
from ..scheduler.chunkmeta import ChunkMetaActor, LocalChunkMetaActor
addr = self.get_scheduler((session_id, chunk_key))
actor_cls = LocalChunkMetaActor if local else ChunkMetaActor
return self.ctx.actor_ref(actor_cls.default_name(), address=addr)
def handle_actors_down(self, halt_refs):
"""
Handle process down event
:param halt_refs: actor refs in halt processes
"""
try:
raise WorkerProcessStopped
except WorkerProcessStopped:
exc_info = sys.exc_info()
handled_refs = self.reject_promise_refs(halt_refs, *exc_info)
logger.debug('Process halt detected. Affected promises %r rejected.',
[ref.uid for ref in handled_refs])
def register_actors_down_handler(self):
from .daemon import WorkerDaemonActor
daemon_ref = self.ctx.actor_ref(WorkerDaemonActor.default_name())
if self.ctx.has_actor(daemon_ref):
daemon_ref.register_callback(self.ref(), self.handle_actors_down.__name__, _tell=True)
class ExpMeanHolder(object):
"""
Collector of statistics of a given series. The value decays by _factor as time elapses.
"""
def __init__(self, factor=0.8):
self._factor = factor
self._count = 0
self._v_divided = 0
self._v_divisor = 0
self._v2_divided = 0
def put(self, value):
self._count += 1
self._v_divided = self._v_divided * self._factor + value
self._v_divisor = self._v_divisor * self._factor + 1
self._v2_divided = self._v2_divided * self._factor + value ** 2
def count(self):
return self._count
def mean(self):
if self._count == 0:
return 0
return self._v_divided * 1.0 / self._v_divisor
def var(self):
if self._count == 0:
return 0
return self._v2_divided * 1.0 / self._v_divisor - self.mean() ** 2
def std(self):
return math.sqrt(self.var())
class ExpiringCache(dict):
def __init__(self, *args, **kwargs):
expire_time = kwargs.pop('_expire_time', options.worker.callback_preserve_time)
super(ExpiringCache, self).__init__(*args, **kwargs)
self._expire_time = expire_time
self._insert_times = OrderedDict3()
def __setitem__(self, key, value):
super(ExpiringCache, self).__setitem__(key, value)
if key in self._insert_times:
self._insert_times[key] = time.time()
self._insert_times.move_to_end(key)
return
clean_keys = []
self._insert_times[key] = time.time()
last_finish_time = time.time() - self._expire_time
for k, t in self._insert_times.items():
if t < last_finish_time:
clean_keys.append(k)
else:
break
for k in clean_keys:
del self[k]
def concat_operand_keys(graph, sep=','):
from ..operands import Fetch
graph_op_dict = OrderedDict()
for c in graph:
if isinstance(c.op, Fetch):
continue
graph_op_dict[c.op.key] = type(c.op).__name__
keys = sep.join(graph_op_dict.keys())
graph_ops = sep.join(graph_op_dict.values())
return keys, graph_ops
def get_chunk_key(key):
return key[0] if isinstance(key, tuple) else key
def build_load_key(graph_key, chunk_key):
if isinstance(chunk_key, tuple):
chunk_key = '@'.join(chunk_key)
return '%s_load_memory_%s' % (graph_key, chunk_key)
|
import oneflow.experimental.nn as nn
from utils.layer_norm import LayerNorm
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class input_SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(input_SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
out,attn = sublayer(self.norm(x))
return x + self.dropout(out), attn
|
#
# Copyright (C) 2014-2019 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
from muninn._compat import string_types as basestring
from muninn.schema import *
from muninn.visitor import TypeVisitor
class _ConfigParser(TypeVisitor):
def visit(self, type, value):
return super(_ConfigParser, self).visit(type, value, "")
def visit_Integer(self, type, value, path):
try:
return int(value)
except ValueError:
raise ValueError(prefix_message_with_path(path, "invalid value %r for type %r" % (value, type.name())))
def visit_Boolean(self, type, value, path):
upper_case_value = value.upper()
if upper_case_value in ("FALSE", "NO", "OFF", "0"):
return False
if upper_case_value in ("TRUE", "YES", "ON", "1"):
return True
raise ValueError(prefix_message_with_path(path, "invalid value %r for type %r" % (value, type.name())))
def visit_Text(self, type, value, path):
return value
def visit_Mapping(self, type, value, path):
path = "%s:" % type.name() if not path else path
try:
iterator = iter(value)
except TypeError:
raise ValueError(prefix_message_with_path(path, "expected a mapping"))
mapping = {}
for sub_name in iterator:
if not isinstance(sub_name, basestring):
raise ValueError(prefix_message_with_path(path, "invalid item name: %r" % sub_name))
try:
sub_value = value[sub_name]
except TypeError:
raise ValueError(prefix_message_with_path(path, "expected a mapping"))
try:
sub_type = type[sub_name]
except KeyError:
raise ValueError(prefix_message_with_path(join(path, sub_name), "unrecognized configuration option"))
mapping[sub_name] = super(_ConfigParser, self).visit(sub_type, sub_value, join(path, sub_name))
return mapping
def visit_Sequence(self, type, value, path):
path = "%s:" % type.name() if not path else path
if not isinstance(value, basestring):
raise ValueError(prefix_message_with_path(path, "invalid value %r for type %r" % (value, type.name())))
sequence = []
for index, sub_value in enumerate(value.split()):
sub_path = path + "[%d]" % index
sequence.append(super(_ConfigParser, self).visit(type.sub_type, sub_value, sub_path))
return sequence
def default(self, type, value, path):
raise InternalError("unsupported type: %s" % type.__name__)
def parse(value, type):
return _ConfigParser().visit(type, value)
|
from PyQt5.QtWidgets import QProgressBar
class Progress(QProgressBar):
def __init__(self, parent=None):
super(Progress, self).__init__(parent)
|
# -*- coding: UTF-8 -*-
from django.conf.urls import url, include
from cmdb import views, views_ajax
urlpatterns = [
url(r'^$', views.getHostList, name='getHostList'),
url(r'^getHostList/$', views.getHostList, name='getHostList'),
url(r'^addHostForm/$', views.addHostForm, name='addHostForm'),
url(r'^addChangeHostInfo/$', views_ajax.addChangeHostInfo, name='addChangeHostInfo'),
url(r'^getHostDetailInfo/$', views_ajax.getHostDetailInfo, name='getHostDetailInfo'),
url(r'^delHost/$', views_ajax.delHost, name='delHost'),
url(r'^addHostUserForm/$', views.addHostUserForm, name='addHostUserForm'),
url(r'^addChangeHostUserInfo/$', views_ajax.addChangeHostUserInfo, name='addChangeHostUserInfo'),
url(r'^getHostUserDetailInfo/$', views_ajax.getHostUserDetailInfo, name='getHostUserDetailInfo'),
url(r'^getHostDetail/(?P<hostId>[0-9]+)/$', views.getHostDetail, name='getHostDetail'),
url(r'^delHostUser/$', views_ajax.delHostUser, name='delHostUser'),
# url(r'^getDbInstanceList/$', views.getDbInstanceList, name='getDbInstanceList'),
# url(r'^getDbClusterList/$', views.getDbClusterList, name='getDbClusterList'),
# url(r'^addDbClusterForm/$', views.addDbClusterForm, name='addDbClusterForm'),
# url(r'^addChangeDbClusterInfo/$', views_ajax.addChangeDbClusterInfo, name='addChangeDbClusterInfo'),
# url(r'^delDbCluster/$', views_ajax.delDbCluster, name='delDbCluster'),
# url(r'^getDbClusterDetail/(?P<clusterId>[0-9]+)/$', views.getDbClusterDetail, name='getDbClusterDetail'),
url(r'^getDbGroupList/$', views.getDbGroupList, name='getDbGroupList'),
url(r'^getDbGroupDetailInfo/$', views_ajax.getDbGroupDetailInfo, name='getDbGroupDetailInfo'),
url(r'^addDbGroupForm/$', views.addDbGroupForm, name='addDbGroupForm'),
url(r'^addChangeDbGroupInfo/$', views_ajax.addChangeDbGroupInfo, name='addChangeDbGroupInfo'),
url(r'^getInstanceDetail/(?P<groupId>[0-9]+)/$', views.getInstanceDetail, name='getInstanceDetail'),
url(r'^addDbInstanceForm/(?P<groupId>[0-9]+)/$', views.addDbInstanceForm, name='addDbInstanceForm'),
url(r'^addChangeDbInstanceInfo/$', views_ajax.addChangeDbInstanceInfo, name='addChangeDbInstanceInfo'),
url(r'^getDbInstanceDetailInfo/$', views_ajax.getDbInstanceDetailInfo, name='getDbInstanceDetailInfo'),
url(r'^changDbInstanceForm/$', views.changDbInstanceForm, name='changDbInstanceForm'),
url(r'^delDbInstance/$', views_ajax.delDbInstance, name='delDbInstance'),
]
|
__version__ = '0.3.3'
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# from django.contrib.auth.models import User
# @admin.register(UserAdmin)
# class UserAdmin(UserAdmin):
# list_display = ('active', 'username', 'email', 'first_name', 'last_name')
# list_filter = ('active', 'username', 'email')
# search_fields = ('username', 'email', 'first_name', 'last_name')
# actions = ['activate_user']
# def activate_image(self, request: HttpRequest queryset):
# queryset.update(active=True)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 17:16:36 2019
@author: epyir
"""
import glob
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.autograd import Variable
import torchvision.transforms as transforms
import os.path as osp
from PIL import Image
import json
import random
class BatchLoader(Dataset):
def __init__(self, imageRoot, gtRoot, batchSize=1, cropSize=(1280, 720)):
super(BatchLoader, self).__init__()
self.imageRoot = imageRoot
self.gtRoot = gtRoot
self.cropSize = cropSize
self.batchsize = batchSize
with open(gtRoot) as json_file:
data = json.load(json_file)
# get image names and labels
action_annotations = data['annotations']
imgNames = data['images']
self.imgNames, self.targets = [], []
forward_sample = 0
for i, img in enumerate(imgNames):
if int(action_annotations[i]['category_id']) == 0 or int(action_annotations[i]['category_id']) == 1:
# if not (int(action_annotations[i]['category_id']) == 0 and forward_sample > 1000):
f = str(int(img['id']) - 1) + '.npy'
self.imgNames.append(osp.join(self.imageRoot, f))
self.targets.append(int(action_annotations[i]['category_id']))
# forward_sample += 1
self.count = len(self.imgNames)
self.perm = list(range(self.count))
random.shuffle(self.perm)
def __len__(self):
return self.count
def __getitem__(self, ind):
imgName = self.imgNames[self.perm[ind]]
target = np.array(self.targets[self.perm[ind]], dtype=np.int64)
img = np.load(imgName)
# transform = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
# transforms.ToTensor(),
# transforms.Normalize(
# mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
img = torch.Tensor(img)
# target = torch.LongTensor(target)
#img = np.array(img)
#img = np.transpose(img, (2, 0, 1))
batchDict = {
'img': img,
'target': target,
}
return batchDict
|
from django.urls import path
from sch import views
urlpatterns = [
path('ls1/',views.list,name='ls1'),
path('footer1',views.footer1,name='fo1'),
path('subs1',views.subs1,name='sub1')
]
|
'''
P22 must be connected to the onewire sensor via 4K7 pull-up resistor
'''
import time
import machine
from machine import Pin
class OneWire:
CMD_SEARCHROM = const(0xf0)
CMD_READROM = const(0x33)
CMD_MATCHROM = const(0x55)
CMD_SKIPROM = const(0xcc)
def __init__(self, pin):
self.pin = pin
self.pin.init(pin.OPEN_DRAIN, pin.PULL_UP)
def reset(self):
"""
Perform the onewire reset function.
Returns True if a device asserted a presence pulse, False otherwise.
"""
sleep_us = time.sleep_us
disable_irq = machine.disable_irq
enable_irq = machine.enable_irq
pin = self.pin
pin(0)
sleep_us(480)
i = disable_irq()
pin(1)
sleep_us(60)
status = not pin()
enable_irq(i)
sleep_us(420)
return status
def read_bit(self):
sleep_us = time.sleep_us
enable_irq = machine.enable_irq
pin = self.pin
pin(1) # half of the devices don't match CRC without this line
i = machine.disable_irq()
pin(0)
sleep_us(1)
pin(1)
sleep_us(1)
value = pin()
enable_irq(i)
sleep_us(40)
return value
def read_byte(self):
value = 0
for i in range(8):
value |= self.read_bit() << i
return value
def read_bytes(self, count):
buf = bytearray(count)
for i in range(count):
buf[i] = self.read_byte()
return buf
def write_bit(self, value):
sleep_us = time.sleep_us
pin = self.pin
i = machine.disable_irq()
pin(0)
sleep_us(1)
pin(value)
sleep_us(60)
pin(1)
sleep_us(1)
machine.enable_irq(i)
def write_byte(self, value):
for i in range(8):
self.write_bit(value & 1)
value >>= 1
def write_bytes(self, buf):
for b in buf:
self.write_byte(b)
def select_rom(self, rom):
"""
Select a specific device to talk to. Pass in rom as a bytearray (8 bytes).
"""
self.reset()
self.write_byte(CMD_MATCHROM)
self.write_bytes(rom)
def crc8(self, data):
"""
Compute CRC
"""
crc = 0
for i in range(len(data)):
byte = data[i]
for b in range(8):
fb_bit = (crc ^ byte) & 0x01
if fb_bit == 0x01:
crc = crc ^ 0x18
crc = (crc >> 1) & 0x7f
if fb_bit == 0x01:
crc = crc | 0x80
byte = byte >> 1
return crc
def scan(self):
"""
Return a list of ROMs for all attached devices.
Each ROM is returned as a bytes object of 8 bytes.
"""
devices = []
diff = 65
rom = False
for i in range(0xff):
rom, diff = self._search_rom(rom, diff)
if rom:
devices += [rom]
if diff == 0:
break
return devices
def _search_rom(self, l_rom, diff):
if not self.reset():
return None, 0
self.write_byte(CMD_SEARCHROM)
if not l_rom:
l_rom = bytearray(8)
rom = bytearray(8)
next_diff = 0
i = 64
for byte in range(8):
r_b = 0
for bit in range(8):
b = self.read_bit()
if self.read_bit():
if b: # there are no devices or there is an error on the bus
return None, 0
else:
if not b: # collision, two devices with different bit meaning
if diff > i or ((l_rom[byte] & (1 << bit)) and diff != i):
b = 1
next_diff = i
self.write_bit(b)
if b:
r_b |= 1 << bit
i -= 1
rom[byte] = r_b
return rom, next_diff
class DS18X20(object):
def __init__(self, onewire):
self.ow = onewire
self.roms = [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
self.fp = True
try:
1/1
except TypeError:
self.fp = False # floatingpoint not supported
def isbusy(self):
"""
Checks wether one of the DS18x20 devices on the bus is busy
performing a temperature convertion
"""
return not self.ow.read_bit()
def start_convertion(self, rom=None):
"""
Start the temp conversion on one DS18x20 device.
Pass the 8-byte bytes object with the ROM of the specific device you want to read.
If only one DS18x20 device is attached to the bus you may omit the rom parameter.
"""
if (rom==None) and (len(self.roms)>0):
rom=self.roms[0]
if rom!=None:
rom = rom or self.roms[0]
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0x44) # Convert Temp
def read_temp_async(self, rom=None):
"""
Read the temperature of one DS18x20 device if the convertion is complete,
otherwise return None.
"""
if self.isbusy():
return None
if (rom==None) and (len(self.roms)>0):
rom=self.roms[0]
if rom==None:
return None
else:
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
return self.convert_temp(rom[0], data)
def convert_temp(self, rom0, data):
"""
Convert the raw temperature data into degrees celsius and return as a fixed point with 2 decimal places.
"""
temp_lsb = data[0]
temp_msb = data[1]
if rom0 == 0x10:
if temp_msb != 0:
# convert negative number
temp_read = temp_lsb >> 1 | 0x80 # truncate bit 0 by shifting, fill high bit with 1.
temp_read = -((~temp_read + 1) & 0xff) # now convert from two's complement
else:
temp_read = temp_lsb >> 1 # truncate bit 0 by shifting
count_remain = data[6]
count_per_c = data[7]
if self.fp:
return temp_read - 25 + (count_per_c - count_remain) / count_per_c
else:
return 100 * temp_read - 25 + (count_per_c - count_remain) // count_per_c
elif rom0 == 0x28:
temp = None
if self.fp:
temp = (temp_msb << 8 | temp_lsb) / 16
else:
temp = (temp_msb << 8 | temp_lsb) * 100 // 16
if (temp_msb & 0xf8) == 0xf8: # for negative temperature
temp -= 0x1000
return temp
else:
assert False
# DS18B20 data line connected to pin P22
ow = OneWire(Pin('P22'))
ds18x20 = DS18X20(ow)
print(len(ds18x20.roms) > 0)
for i in range(5):
ds18x20.start_convertion()
time.sleep(1)
print(ds18x20.read_temp_async() is not None)
time.sleep(1)
|
# Create your views here.
# Create your views here.
from django.views.generic.list import ListView
# from django_filters.rest_framework import DjangoFilterBackend, FilterSet, OrderingFilter
from django_gotolong.nach.models import Nach
class NachListView(ListView):
model = Nach
# if pagination is desired
# paginate_by = 300
# filter_backends = [filters.OrderingFilter,]
# ordering_fields = ['sno', 'nse_symbol']
queryset = Nach.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
# from django.http import HttpResponse
# def index(request):
# return HttpResponse("Hello, world. You're at the polls index.")
#
|
# the main chanpy of the project
import getmac
import time
from datetime import datetime
from random import randint
import json
import requests as req
from webapp.user_login_app import mongo
from db.flaskdbreader import int2dt
import numpy as np
class Client:
def __init__(self):
self._t = None
self._u_id = None
self._u_name = None
# checking if user has account
self._mac = getmac.getmac.get_mac_address()
_u_mac = mongo.db.users.find_one({'mac': self._mac})
if _u_mac is None:
print('Register first:')
self.register()
else:
self._u_id = mongo.db.users.find_one({'mac': self._mac})['_id']
self._t = mongo.db.api_tokens.find_one({'user_id': self._u_id})['token']
self._u_name = mongo.db.users.find_one({'_id': self._u_id})['name']
# Provides sign-up link if user is not registered
def register(self):
print('Sign up here: http://127.0.0.1:5001/registration')
api_token = input('Token: ')
try:
self._t = mongo.db.api_tokens.find_one({'token': api_token})['token']
self._u_id = mongo.db.api_tokens.find_one({'token': api_token})['user_id']
self._u_name = mongo.db.users.find_one({'_id': self._u_id})['name']
req.put(url=f'http://127.0.0.1:5001/user/{self._u_id}') # setting user as admin
print('Logged in as {0}'.format(self._u_name))
# giving possibility to copy and pasting again the token
except TypeError:
api_token = input('Invalid or missing token, make sure you copied it right: ')
t = mongo.db.api_tokens.find_one({'token': api_token})
u = mongo.db.api_tokens.find_one({'token': api_token})
while t is None or u is None:
api_token = input('Invalid or missing token, make sure you copied it right (type exit to abort): ')
if api_token.lower() == 'exit':
print('Please register correctly or you will not be able to use any command')
break
t = mongo.db.api_tokens.find_one({'token': api_token})
u = mongo.db.api_tokens.find_one({'token': api_token})
else:
self._t = t['token']
self._u_id = t['user_id']
self._u_name = mongo.db.users.find_one({'_id': self._u_id})['name']
req.put(url=f'http://127.0.0.1:5001/user/{self._u_id}') # setting user as admin
print('Logged in as {0}'.format(self._u_name))
# provides all users signed up
def users(self):
r = req.get(url='http://127.0.0.1:5001/user', headers={'x-access-token': self._t})
data = r.json()['Users']
res = []
for user in data:
res.append(user["name"])
return np.array(res, dtype=object)
# Returns number of images in active threads
def n_images(self) -> int:
r = req.get(url='http://127.0.0.1:5000/active-images', headers={'x-access-token': self._t})
data = r.json()['active-images']
return data
# Returns number of replies in active threads
def n_replies(self) -> int:
r = req.get(url='http://127.0.0.1:5000/active-replies', headers={'x-access-token': self._t})
data = r.json()['active-replies']
return data
# Return all posts in a given timespan
def posts_ts(self, start_ddmmyyyy: str):
start_ts = int(datetime.strptime(start_ddmmyyyy, "%d/%m/%Y").timestamp() * 1000)
r = req.get(url='http://127.0.0.1:5000/posts/from', params={'start': start_ts},
headers={'x-access-token': self._t})
data = r.json()['posts']
return np.array(data, dtype=object)
# Returns last "n" posts
def latest_np(self, n: str) -> [[int, str, str, str]]:
n = str(n)
r = req.get(url='http://127.0.0.1:5000/posts/last', params={'n': n}, headers={'x-access-token': self._t})
data = r.json()['posts']
return np.array(data, dtype=object)
# Returns statistics on users countries
def c_stats(self, start_ddmmyyyy: str, end_ddmmyyyy: str):
start_ts = int(datetime.strptime(start_ddmmyyyy, "%d/%m/%Y").timestamp() * 1000)
end_ts = int(datetime.strptime(end_ddmmyyyy, "%d/%m/%Y").timestamp() * 1000)
r = req.get(url='http://127.0.0.1:5000/stats/country/from', params={'start': start_ts, 'end': end_ts},
headers={'x-access-token': self._t})
return r.json()
# Returns statistics on board
def b_stats(self, start_ddmmyyyy: str, end_ddmmyyyy: str):
start_ts = int(datetime.strptime(start_ddmmyyyy, "%d/%m/%Y").timestamp() * 1000)
end_ts = int(datetime.strptime(end_ddmmyyyy, "%d/%m/%Y").timestamp() * 1000)
r = req.get(url='http://127.0.0.1:5000/stats/fetch', params={'start': start_ts, 'end': end_ts},
headers={'x-access-token': self._t})
data = r.json()['bstats']
return np.array(data, dtype=object)
# Search for thread with given number
def thread(self, thread_number=0):
thrd_no = thread_number
r = req.get(url='http://127.0.0.1:5000/threads', params={'thrd_no': thrd_no},
headers={'x-access-token': self._t})
thrd = r.json()['threads']
if type(thrd) == list:
res = [(thrd[idx][0], thrd[idx][1], thrd[idx][2], int2dt(thrd[idx][3]).strftime('%d/%m/%Y %H:%M:%S'),
round(thrd[idx][4], 2), round(thrd[idx][5], 2)) for idx, k in enumerate(thrd)]
return np.array(res)
elif type(thrd) == dict:
res = thrd['_id'], thrd['#images'], thrd['#replies'], int2dt(thrd['ts']).strftime('%d/%m/%Y %H:%M:%S'), \
round(thrd['subjectivity'], 2), round(thrd['polarity'], 2)
return np.array(res)
elif type(thrd) is None:
return 'Thread not found'
def add_t(self, title: str, username: str, text: str): # Creates a new thread
thread = {
'_id': randint(1000000, 9999999),
'title': title,
'users': username,
'text': text,
'ts': int(round(time.time() * 1000)),
'posts': []
}
r = req.post(url='http://127.0.0.1:5000/threads',
data=json.dumps(thread, indent=4, sort_keys=True, default=str), headers={'x-access-token': self._t})
thrd = r.json()
res = thrd['_id'], thrd['title'], thrd['text'], thrd['users'], int2dt(thrd['ts']).strftime('%d/%m/%Y %H:%M:%S')
return np.array(res)
def del_t(self, thread_number: int): # Deletes a thread
thrd_no = thread_number
r = req.delete(url='http://127.0.0.1:5000/threads', params={'thrd_no': str(thrd_no)},
headers={'x-access-token': self._t})
return r.text
def add_p(self, thread_number: int, title: str, username: str, text: str): # Creates a new post on a selected thread
thrd_no = thread_number
post = {
'_id': randint(1000000, 9999999),
'thread_id': int(thrd_no),
'title': title,
'users': username,
'text': text,
'ts': int(round(time.time() * 1000))
}
r = req.post(url='http://127.0.0.1:5000/threads/posts',
data=json.dumps(post, indent=4, sort_keys=True, default=str), headers={'x-access-token': self._t})
thrd = r.json()
pst = [i for i in thrd['posts'] if i['_id'] == post['_id']]
res = thrd['_id'], pst[0]['title'], pst[0]['text'], pst[0]['users'], int2dt(pst[0]['ts']).strftime(
'%d/%m/%Y %H:%M:%S')
return np.array(res)
# Search for post with given thread number
def posts(self, thread_number: int, n=-1, msg=False):
thrd_no = thread_number
lastn = n
if lastn == 0:
lastn = 1
r = req.get(url='http://127.0.0.1:5000/threads/posts', params={'thrd_no': thrd_no},
headers={'x-access-token': self._t})
pst = r.json()['posts']
if msg:
res = [(pst[k][0], pst[k][1][0], pst[k][1][1], pst[k][1][2],
int2dt(pst[k][1][3]).strftime('%d/%m/%Y %H:%M:%S'),
pst[k][1][4], pst[k][1][5]) for k in range(len(pst))]
else:
res = [(pst[k][0], int2dt(pst[k][1][3]).strftime('%d/%m/%Y %H:%M:%S'),
pst[k][1][4], pst[k][1][5]) for k in range(len(pst))]
if pst is not None:
if lastn == 1:
return res[-1]
elif lastn == -1:
return np.array(res[:-lastn:-1], dtype=object)
else:
return np.array(res[:-(lastn + 1):-1], dtype=object) # returns every post or last n posts
else:
return 'Thread not found'
def max_att(self):
r = req.get(url='http://127.0.0.1:5000/threads/attachment/max', headers={'x-access-token': self._t})
data = r.json()['biggest-att']
res = data['filename'], data['size']/1000000, data['ext']
return np.array(res, dtype=object)
def att(self):
r = req.get(url='http://127.0.0.1:5000/threads/attachment', headers={'x-access-token': self._t})
data = r.json()['att']
return np.array(data)
|
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FPublisherTransport(object):
"""
FPublisherTransport is used exclusively for pub/sub scopes. Publishers use
it to publish to a topic.
"""
def __init__(self, max_message_size):
self._max_message_size = max_message_size
def open(self):
"""
Opens the transport for use.
"""
raise NotImplementedError('You must override this')
def close(self):
"""
Closes the transport.
"""
raise NotImplementedError('You must override this')
def is_open(self):
"""
Returns True if the transport is open, False otherwise.
"""
raise NotImplementedError('You must override this')
def get_publish_size_limit(self):
"""
Returns the maximum allowable size of a payload to be published. A
non-positive number is returned to indicate an unbounded allowable
size.
"""
return self._max_message_size
def publish(self, topic, data):
"""
Publish sends the given data with the transport to the given topic.
Implementations of publish should be threadsafe
"""
raise NotImplementedError('You must override this')
def _check_publish_size(self, data):
"""
Returns True if the data is of permissible size, False otherwise.
"""
return len(data) > self._max_message_size > 0
class FSubscriberTransport(object):
"""
FSubscriberTransport is used exclusively for pub/sub scopes. Subscribers
use it to subscribe to a pub/sub topic.
"""
def subscribe(self, topic, callback):
"""
Subscribes to a pub/sub topic and executes the callback with each
received message.
"""
raise NotImplementedError('You must override this')
def unsubscribe(self):
"""
Unsubscribes from the current topic.
"""
raise NotImplementedError('You must override this')
def remove(self):
"""
Unsubscribe and removes durably stored information on the broker,
if applicable.
"""
return self.unsubscribe()
def is_subscribed(self):
"""
Returns True if the transport is subscribed to a topic, False
otherwise.
"""
raise NotImplementedError('You must override this')
|
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 32500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 110000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Jens Krüger <jens.krueger@frm2.tum.de>
#
# *****************************************************************************
from os import path
from nicos.guisupport.qt import QDialog, uic
class NewValueDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
uic.loadUi(path.abspath(path.join(path.dirname(__file__),
'..',
'ui',
'dialogs',
'newvaluedialog.ui')), self)
def getValue(self):
return self.lineEditNewValue.text()
|
"""An API for dealing with orientation and rotations in 3-D space.
"""
import numpy as np
from walle.core import constants, quaternion, utils
from walle.core.matrix import RotationMatrix
from walle.core.orthogonal import is_proper_rotm
class Orientation(object):
"""A convenience class for manipulating 3-D orientations and rotations.
Attributes:
rotm (ndarray): A 3x3 rotation matrix.
axis_angle (tuple): axis angle.
rot_vec (ndarray): rotation vector.
quat (``UnitQuaternion``): unit quaternion.
References:
[1]: http://marc-b-reynolds.github.io/quaternions/2017/08/08/QuatRotMatrix.html
[2]: https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
[3]: http://lolengine.net/blog/2014/02/24/quaternion-from-two-vectors-final
"""
def __init__(self, *args):
"""Initializes the orientation in any of the following ways:
u, theta: (array_like, float) Axis-angle. Represents a
single rotation by a given angle `theta` in radians
about a fixed axis represented by the unit vector `u`.
rot_vec: (array_like) Rotation vector. Corresponds
to the 3-element representation of the axis-angle,
i.e. `rot_vec = theta * u`.
rotm: (array_like) a `3x3` orthogonal rotation matrix.
quat: (Quaternion or UnitQuaternion) a unit quaternion. The
quaternion is normalized if it's a `Quaternion` object.
If no arguments are passed, an identity Orientation is initialized.
"""
if len(args) == 0:
self._quat = quaternion.UnitQuaternion()
elif len(args) == 1:
if utils.is_iterable(args[0]):
if len(args[0]) == 3 and utils.is_1d_iterable(args[0]): # rot_vec
self._rot_vec = np.asarray(args[0], dtype="float64")
self._rotvec2quat()
elif len(args[0]) == 3 and np.asarray(args[0]).shape == (3, 3): # rotm
rotm = np.asarray(args[0], dtype="float64")
self._rotm2quat(rotm)
else:
raise ValueError("[!] Could not parse constructor arguments.")
elif isinstance(args[0], quaternion.Quaternion): # quat
self._quat = quaternion.UnitQuaternion(args[0])
else:
raise ValueError("[!] Expecting array_like or Quaternion.")
elif len(args) == 2:
if utils.is_iterable(args[0]) and isinstance(args[1], (int, float)): # axang
if len(args[0]) != 3:
raise ValueError("[!] Axis must be length 3 array_like.")
self._u = np.asarray(args[0], dtype="float64")
self._theta = float(args[1])
if np.isclose(self._theta, 0.): # no unique axis, default to i
self._u = np.array([1, 0, 0], dtype="float64")
if not np.isclose(np.dot(self._u, self._u), 1.):
self._u /= np.linalg.norm(self._u)
self._axang2quat()
else:
raise ValueError("[!] Expecting (axis, angle) tuple.")
else:
raise ValueError("[!] Incorrect number of arguments.")
def __repr__(self):
s = "{}[θ: {:.5f}, u: ({:.5f}, {:.5f}, {:.5f})]"
u, theta = self._quat.axis_angle
return s.format(self.__class__.__name__, theta, *list(u))
def __eq__(self, other):
raise NotImplementedError("[!] Use `eq_rot` or `eq_ori` for equality checks.")
def __mul__(self, other):
"""Orientation multiplication.
"""
if utils.is_iterable(other):
if utils.is_batch_vectors(other) or utils.is_single_vector(other):
return np.dot(self.rotm, np.asarray(other, dtype="float64"))
else:
if all(isinstance(x, Orientation) for x in other): # batch of orientations
accessor = lambda x: x._quat
elif all(isinstance(x, quaternion.UnitQuaternion) for x in other): # batch of quaternions
accessor = lambda x: x
else:
raise NotImplemented
quat_prod = self._quat
for o in other:
quat_prod = quat_prod * accessor(o)
quat_prod.fnormalize(True)
return self.__class__(quat_prod)
else:
if isinstance(other, Orientation):
other = other._quat
elif isinstance(other, quaternion.UnitQuaternion):
other = other
else:
raise NotImplemented
quat_prod = self._quat * other
quat_prod.fnormalize(True)
return self.__class__(quat_prod)
def eq_rot(self, other):
"""This checks whether two orientations correspond to the same rotation.
"""
if isinstance(other, Orientation):
if self._quat.dot(other._quat) > 1 - constants.EPS:
return True
return False
else:
raise NotImplementedError("[!] Other must be an instance of {}.".format(self.__class__.__name__))
def eq_ori(self, other):
"""This checks whether two orientations correspond to the same orientation.
"""
if isinstance(other, Orientation):
if abs(self._quat.dot(other._quat)) > 1 - constants.EPS:
return True
return False
else:
raise NotImplementedError("[!] Other must be an instance of {}.".format(self.__class__.__name__))
def inv(self, inplace=False):
"""Returns the inverse of this orientation.
"""
if not inplace:
return self.__class__(self._quat.inv())
self._quat.inv(True)
def _rotvec2quat(self):
"""Converts a rotation vector to a unit-quaternion.
"""
self._theta = np.linalg.norm(self._rot_vec)
if np.isclose(self._theta, 0.):
self._u = np.array([1, 0, 0], dtype="float64")
self._theta = 0.
else:
self._u = self._rot_vec / self._theta
self._axang2quat()
def _axang2quat(self):
"""Converts an axis-angle to a unit-quaternion.
"""
s = np.cos(self._theta / 2)
v = self._u * np.sin(self._theta / 2)
self._quat = quaternion.UnitQuaternion(s, v)
def _rotm2quat(self, rotm):
"""Converts a rotation matrix to a unit-quaternion.
"""
m00 = rotm[0, 0]
m01 = rotm[0, 1]
m02 = rotm[0, 2]
m10 = rotm[1, 0]
m11 = rotm[1, 1]
m12 = rotm[1, 2]
m20 = rotm[2, 0]
m21 = rotm[2, 1]
m22 = rotm[2, 2]
if is_proper_rotm(rotm): # Shepperd's algorithm
if m22 >= 0:
a = m00 + m11
b = m10 - m01
c = 1. + m22
if a >= 0:
s = c + a
q = [s, m21 - m12, m02 - m20, b]
else:
s = c - a
q = [b, m02 + m20, m21 + m12, s]
else:
a = m00 - m11
b = m10 + m01
c = 1. - m22
if a >= 0:
s = c + a
q = [m21 - m12, s, b, m02 + m20]
else:
s = c - a
q = [m02 - m20, b, s, m21 + m12]
q = 0.5 * np.array(q) * (1. / np.sqrt(s))
else: # Bar-Itzhack's algorithm
Q = np.array([
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
])
Q /= 3.
eig_values, eig_vectors = np.linalg.eigh(Q)
q = eig_vectors[np.argmax(eig_values)][[3, 0, 1, 2]]
if q[0] < 0.:
q = -q
self._quat = quaternion.UnitQuaternion(q)
@classmethod
def from_quats(cls, u, v):
"""Computes the rotation that rotates quaternion `u` to quaternion `v`.
Args:
u: (UnitQuaternion) the starting point quaternion.
v: (UnitQuaternion) the end point quaternion.
"""
if all(isinstance(x, quaternion.UnitQuaternion) for x in [u, v]):
return cls(v * u.inv())
else:
raise ValueError("[!] Inputs must all be unit-quaternions.")
@classmethod
def from_vecs(cls, u, v):
"""Computes the rotation that rotates vector `u` to vector `v`.
Implements the algorithm described in [3].
Args:
u: (array_like) the starting 3-D vector.
v: (array_like) the final 3-D vector.
"""
norm_uv = np.sqrt(np.dot(u, u) * np.dot(v, v))
s = norm_uv + np.dot(u, v)
if s < (1e-6 * norm_uv): # if u and v point in opposite directions
# rotate 180 degrees about any orthogonal axis
s = 0.
if abs(u[0]) > abs(u[2]):
v = np.array([-u[1], u[0], 0.])
else:
v = np.array([0., -u[2], u[1]])
else:
v = np.cross(u, v)
return cls(quaternion.UnitQuaternion(s, v))
@classmethod
def randquat(cls):
"""Generates an orientation by randomly sampling a quaternion.
"""
return cls(quaternion.UnitQuaternion.random())
@classmethod
def randrotm(cls):
"""Generates an orientation by randomly sampling a rotation matrix.
"""
return cls(RotationMatrix.random())
@property
def rotm(self):
return self._quat.rotm
@property
def quat(self):
return self._quat
@property
def axis_angle(self):
return self._quat.axis_angle
@property
def rot_vec(self):
return self._quat.rot_vec
|
# coding: utf-8
import flask
import flask_wtf
import wtforms
import auth
import config
import model
import util
from main import app
###############################################################################
# Admin Stuff
###############################################################################
@app.route('/admin/')
@auth.admin_required
def admin():
localhost = None
if config.DEVELOPMENT and ':' in flask.request.host:
try:
parts = flask.request.host.split(':')
port = int(parts[1]) + 1
localhost = 'http://%s:%s/' % (parts[0], port)
except:
pass
return flask.render_template(
'admin/admin.html',
title='Admin',
html_class='admin',
localhost=localhost,
)
###############################################################################
# Config Stuff
###############################################################################
class ConfigUpdateForm(flask_wtf.FlaskForm):
analytics_id = wtforms.StringField(model.Config.analytics_id._verbose_name, filters=[util.strip_filter])
announcement_html = wtforms.TextAreaField(model.Config.announcement_html._verbose_name, filters=[util.strip_filter])
announcement_type = wtforms.SelectField(model.Config.announcement_type._verbose_name, choices=[(t, t.title()) for t in model.Config.announcement_type._choices])
anonymous_recaptcha = wtforms.BooleanField(model.Config.anonymous_recaptcha._verbose_name)
brand_name = wtforms.StringField(model.Config.brand_name._verbose_name, [wtforms.validators.required()], filters=[util.strip_filter])
check_unique_email = wtforms.BooleanField(model.Config.check_unique_email._verbose_name)
email_authentication = wtforms.BooleanField(model.Config.email_authentication._verbose_name)
feedback_email = wtforms.StringField(model.Config.feedback_email._verbose_name, [wtforms.validators.optional(), wtforms.validators.email()], filters=[util.email_filter])
flask_secret_key = wtforms.StringField(model.Config.flask_secret_key._verbose_name, [wtforms.validators.optional()], filters=[util.strip_filter])
letsencrypt_challenge = wtforms.StringField(model.Config.letsencrypt_challenge._verbose_name, filters=[util.strip_filter])
letsencrypt_response = wtforms.StringField(model.Config.letsencrypt_response._verbose_name, filters=[util.strip_filter])
notify_on_new_user = wtforms.BooleanField(model.Config.notify_on_new_user._verbose_name)
recaptcha_private_key = wtforms.StringField(model.Config.recaptcha_private_key._verbose_name, filters=[util.strip_filter])
recaptcha_public_key = wtforms.StringField(model.Config.recaptcha_public_key._verbose_name, filters=[util.strip_filter])
salt = wtforms.StringField(model.Config.salt._verbose_name, [wtforms.validators.optional()], filters=[util.strip_filter])
trusted_hosts = wtforms.StringField(model.Config.trusted_hosts._verbose_name, [wtforms.validators.optional()], description='Comma separated: 127.0.0.1, example.com, etc')
verify_email = wtforms.BooleanField(model.Config.verify_email._verbose_name)
@app.route('/admin/config/', methods=['GET', 'POST'])
@auth.admin_required
def admin_config():
config_db = model.Config.get_master_db()
form = ConfigUpdateForm(obj=config_db)
if form.validate_on_submit():
if form.trusted_hosts.data:
form.trusted_hosts.data = set(
[e.strip() for e in form.trusted_hosts.data.split(',')])
else:
form.trusted_hosts.data = []
form.populate_obj(config_db)
if not config_db.flask_secret_key:
config_db.flask_secret_key = util.uuid()
if not config_db.salt:
config_db.salt = util.uuid()
config_db.put()
reload(config)
app.config.update(CONFIG_DB=config_db)
return flask.redirect(flask.url_for('admin'))
form.trusted_hosts.data = ', '.join(config_db.trusted_hosts)
return flask.render_template(
'admin/admin_config.html',
title='App Config',
html_class='admin-config',
form=form,
api_url=flask.url_for('api.admin.config'),
)
###############################################################################
# Auth Stuff
###############################################################################
class AuthUpdateForm(flask_wtf.FlaskForm):
azure_ad_client_id = wtforms.StringField(model.Config.azure_ad_client_id._verbose_name, filters=[util.strip_filter])
azure_ad_client_secret = wtforms.StringField(model.Config.azure_ad_client_secret._verbose_name, filters=[util.strip_filter])
bitbucket_key = wtforms.StringField(model.Config.bitbucket_key._verbose_name, filters=[util.strip_filter])
bitbucket_secret = wtforms.StringField(model.Config.bitbucket_secret._verbose_name, filters=[util.strip_filter])
dropbox_app_key = wtforms.StringField(model.Config.dropbox_app_key._verbose_name, filters=[util.strip_filter])
dropbox_app_secret = wtforms.StringField(model.Config.dropbox_app_secret._verbose_name, filters=[util.strip_filter])
facebook_app_id = wtforms.StringField(model.Config.facebook_app_id._verbose_name, filters=[util.strip_filter])
facebook_app_secret = wtforms.StringField(model.Config.facebook_app_secret._verbose_name, filters=[util.strip_filter])
github_client_id = wtforms.StringField(model.Config.github_client_id._verbose_name, filters=[util.strip_filter])
github_client_secret = wtforms.StringField(model.Config.github_client_secret._verbose_name, filters=[util.strip_filter])
google_client_id = wtforms.StringField(model.Config.google_client_id._verbose_name, filters=[util.strip_filter])
google_client_secret = wtforms.StringField(model.Config.google_client_secret._verbose_name, filters=[util.strip_filter])
instagram_client_id = wtforms.StringField(model.Config.instagram_client_id._verbose_name, filters=[util.strip_filter])
instagram_client_secret = wtforms.StringField(model.Config.instagram_client_secret._verbose_name, filters=[util.strip_filter])
linkedin_api_key = wtforms.StringField(model.Config.linkedin_api_key._verbose_name, filters=[util.strip_filter])
linkedin_secret_key = wtforms.StringField(model.Config.linkedin_secret_key._verbose_name, filters=[util.strip_filter])
mailru_app_id = wtforms.StringField(model.Config.mailru_app_id._verbose_name, filters=[util.strip_filter])
mailru_app_secret = wtforms.StringField(model.Config.mailru_app_secret._verbose_name, filters=[util.strip_filter])
microsoft_client_id = wtforms.StringField(model.Config.microsoft_client_id._verbose_name, filters=[util.strip_filter])
microsoft_client_secret = wtforms.StringField(model.Config.microsoft_client_secret._verbose_name, filters=[util.strip_filter])
reddit_client_id = wtforms.StringField(model.Config.reddit_client_id._verbose_name, filters=[util.strip_filter])
reddit_client_secret = wtforms.StringField(model.Config.reddit_client_secret._verbose_name, filters=[util.strip_filter])
twitter_consumer_key = wtforms.StringField(model.Config.twitter_consumer_key._verbose_name, filters=[util.strip_filter])
twitter_consumer_secret = wtforms.StringField(model.Config.twitter_consumer_secret._verbose_name, filters=[util.strip_filter])
vk_app_id = wtforms.StringField(model.Config.vk_app_id._verbose_name, filters=[util.strip_filter])
vk_app_secret = wtforms.StringField(model.Config.vk_app_secret._verbose_name, filters=[util.strip_filter])
yahoo_consumer_key = wtforms.StringField(model.Config.yahoo_consumer_key._verbose_name, filters=[util.strip_filter])
yahoo_consumer_secret = wtforms.StringField(model.Config.yahoo_consumer_secret._verbose_name, filters=[util.strip_filter])
@app.route('/admin/auth/', methods=['GET', 'POST'])
@auth.admin_required
def admin_auth():
config_db = model.Config.get_master_db()
form = AuthUpdateForm(obj=config_db)
if form.validate_on_submit():
form.populate_obj(config_db)
config_db.put()
reload(config)
app.config.update(CONFIG_DB=config_db)
return flask.redirect(flask.url_for('admin'))
return flask.render_template(
'admin/admin_auth.html',
title='Auth Config',
html_class='admin-auth',
form=form,
api_url=flask.url_for('api.admin.config'),
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-05-27 08:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0060_auto_20200527_1335'),
]
operations = [
migrations.AlterModelOptions(
name='apiaryreferral',
options={},
),
migrations.RemoveField(
model_name='apiaryreferral',
name='linked',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='lodged_on',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='processing_status',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='proposal',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='referral_text',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='sent_by',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='sent_from',
),
migrations.RemoveField(
model_name='apiaryreferral',
name='text',
),
migrations.AlterField(
model_name='apiaryreferral',
name='referral',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='apiary_referral', to='disturbance.Referral'),
),
migrations.AlterField(
model_name='proposaltype',
name='name',
field=models.CharField(choices=[('Disturbance', 'Disturbance'), ('Powerline Maintenance', 'Powerline Maintenance'), ('Site Transfer', 'Site Transfer'), ('Temporary Use', 'Temporary Use'), ('Apiary', 'Apiary')], default='Disturbance', max_length=64, verbose_name='Application name (eg. Disturbance, Apiary)'),
),
]
|
## @file host.py
## @brief Utility functions for hosts
"""
Utility functions for hosts.
Detailed description (for [e]pydoc goes here)
"""
from pyVmomi import Vim
from pyVmomi.VmomiSupport import ResolveLinks
## @param si [in] Retrieve the root folder
def GetRootFolder(si):
"""
Retrieve the root folder.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a Folder
@return : Reference to the top of the inventory managed by this
service.
"""
content = si.RetrieveContent()
rootFolder = content.GetRootFolder()
return rootFolder
## @param si [in] Retrieve the fault tolerance manager
def GetFaultToleranceMgr(si):
"""
Retrieve the fault tolerance manager.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a FaultToleranceManager
@return : The FaultToleranceManager for this ServiceInstance.
"""
content = si.RetrieveInternalContent()
ftMgr = content.GetFtManager()
return ftMgr
## @param si [in] Retrieve the host folder
def GetHostFolder(si):
"""
Retrieve the host folder.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a Folder
@return : A reference to the folder hierarchy that contains the
compute resources, including hosts and clusters.
"""
content = si.RetrieveContent()
dataCenter = content.GetRootFolder().GetChildEntity()[0]
hostFolder = dataCenter.GetHostFolder()
return hostFolder
## @param si [in] Retrieve the compute resource for the host
def GetComputeResource(si):
"""
Retrieve the compute resource for the host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a ComputeResource
@return :
"""
hostFolder = GetHostFolder(si)
computeResource = hostFolder.GetChildEntity()[0]
return computeResource
## @param si [in] Retrieve the host system
def GetHostSystem(si):
"""
Retrieve the host system.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a HostSystem
@return :
"""
computeResource = GetComputeResource(si)
hostSystem = computeResource.GetHost()[0]
return hostSystem
## @param si [in] Retrieve the host config manager
def GetHostConfigManager(si):
"""
Retrieve the host config manager.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a HostConfigManager.
@return :
"""
hostSystem = GetHostSystem(si)
configManager = hostSystem.GetConfigManager()
return configManager
## @param si [in] Retrieve the host's virtual nic manager
def GetHostVirtualNicManager(si):
"""
Retrieve the host VirtualNicManager
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a VirtualNicManager
@return :
"""
configMgr = GetHostConfigManager(si)
vnicMgr = configMgr.GetVirtualNicManager()
return vnicMgr
## @param si [in] Retrieve the host vmotion system
def GetHostVmotionSystem(si):
"""
Retrieve the host vmotion system.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a VmotionSystem.
@return :
"""
configMgr = GetHostConfigManager(si)
vmotionSystem = configMgr.GetVmotionSystem()
return vmotionSystem
## @param si [in] Retrieve the UUID of the host
def GetHostUuid(si):
"""
Retrieve the UUID of the host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : str
@return : Hardware BIOS identification.
"""
hostSystem = GetHostSystem(si)
hwInfo = hostSystem.GetHardware()
if hwInfo == None:
raise Exception("Hardware info of host is NULL.")
return hwInfo.GetSystemInfo().GetUuid()
## @param si [in] Retrieve the root resource pool of a host
def GetRootResourcePool(si):
"""
Retrieve the root resource pool of a host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a ResourcePool
@return : Reference to root resource pool.
"""
computeRes = GetComputeResource(si)
resPool = computeRes.GetResourcePool()
return resPool
def GetNicIp(si, nicType):
"""
Retrieve the IP associated with a specified Nic type on the host.
@type si : ServiceInstance ManagedObject
@param si :
@type nicType : str
@param nicType : Type of Nic
@rtype : str
@return : The IP address currently used by
the specified nic type, if selected
"""
vnicMgr = GetHostVirtualNicManager(si)
netConfig = vnicMgr.QueryNetConfig(nicType)
if netConfig == None:
raise Exception("NetConfig is NULL.")
vnicArr = ResolveLinks(netConfig.GetSelectedVnic(), netConfig)
if len(vnicArr) < 1:
raise Exception("No Nic configured for type " + nicType)
ipConfig = vnicArr[0].GetSpec().GetIp()
return ipConfig.GetIpAddress()
## @param si [in] Retrieve the VMotion IP of the host
def GetVMotionIP(si):
"""
Retrieve the VMotion IP of the host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : str
@return : The IP address currently used by the VMotion NIC.
All IP addresses are specified using
IPv4 dot notation. For example,"192.168.0.1".
Subnet addresses and netmasks are specified using
the same notation.
"""
return GetNicIp(si, Vim.Host.VirtualNicManager.NicType.vmotion)
## @param si [in] Retrieve the FT Logging Nic IP of the host
def GetLoggingIP(si):
"""
Retrieve the FT Logging Nic IP of the host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : str
@return : The IP address currently used by the FT Logging NIC.
All IP addresses are specified using
IPv4 dot notation. For example,"192.168.0.1".
Subnet addresses and netmasks are specified using
the same notation.
"""
return GetNicIp(si, Vim.Host.VirtualNicManager.NicType.faultToleranceLogging)
## @param si [in] Retrieve the VMotionManager instance for a host
def GetVmotionManager(si):
"""
Retrieve the VMotionManager instance for a host.
@type si : ServiceInstance ManagedObject
@param si :
@rtype : ManagedObjectReference to a VmotionManager.
@return :
"""
vmotionMgr = Vim.Host.VMotionManager("ha-vmotionmgr", si._stub)
return vmotionMgr
## @param si [in] Prints out host config information
def DumpHostConfig(si):
"""
Dumps host config information to stdout.
@type si : ServiceInstance ManagedObject
@param si :
"""
print("Host Configuration:")
hostSystem = GetHostSystem(si)
hostSummary = hostSystem.GetSummary()
print("Host Summary:\n", hostSummary)
hostRuntime = hostSystem.GetRuntime()
print("Host Runtime:\n", hostRuntime)
hostConfigManager = hostSystem.GetConfigManager()
print(hostConfigManager)
datastoreSystem = hostConfigManager.GetDatastoreSystem()
datastoreList = hostSystem.GetDatastore()
print(datastoreList)
storageSystem = hostConfigManager.GetStorageSystem()
# Broken because python wrappers don't handle @links correctly.
# Loops in the structures break the python wrappers
# storageConfig = storageSystem.GetStorageInfo()
# print storageConfig
networkSystem = hostConfigManager.GetNetworkSystem()
networkInfo = networkSystem.GetNetworkInfo()
print(networkInfo)
## @param si [in] Checks if host is a mockup
def IsHostMockup(si):
"""
Checks if host is a mockup
@type si : ServiceInstance ManagedObject
@param si :
@rtype : boolean
@return : True if host is running in mockup mode, False otherwise
"""
content = si.RetrieveContent()
fullName = content.GetAbout().GetInstanceUuid()
return (fullName != None and fullName.find("Mockup") != -1)
|
import geopandas as gpd
import numpy as np
from shapely.geometry import LineString, Polygon, Point
import os
import pandas as pd
from pyproj import Transformer
transformer = Transformer.from_crs(4326, 2193, always_xy=True)
trans_inv = Transformer.from_crs(2193, 4326, always_xy=True)
def fit_plane_to_points(points: np.ndarray, eps: float=1.0e-5):
"""
Find best-fit plane through a set of points, after first insuring the plane goes through
the mean (centroid) of all the points in the array. This is probably better than my
initial method, since the SVD is only over a 3x3 array (rather than the num_pointsxnum_points
array).
Returned values are:
plane_normal: Normal vector to plane (A, B, C)
plane_origin: Point on plane that may be considered as the plane origin
"""
# Compute plane origin and subract it from the points array.
plane_origin = np.mean(points, axis=0)
x = points - plane_origin
# Dot product to yield a 3x3 array.
moment = np.dot(x.T, x)
# Extract single values from SVD computation to get normal.
plane_normal = np.linalg.svd(moment)[0][:,-1]
small = np.where(np.abs(plane_normal) < eps)
plane_normal[small] = 0.0
plane_normal /= np.linalg.norm(plane_normal)
if (plane_normal[-1] < 0.0):
plane_normal *= -1.0
return plane_normal, plane_origin
# Locations of points where slip rate changes
east_cape = Point(178.9916, -39.1775)
start_0_2 = Point(180.0, -37.00)
end_0_2 = Point(-177.3995, -32.5061)
start_0_5 = Point(-176.673, -31.016)
convergence_start = Point(179.098, -39.014)
convergence_end = Point(-174.162, -27.508)
def point_dist(point: Point):
return np.dot(along_overall, np.array(transformer.transform(point.x, point.y)))
def point_dist_nztm(point: Point):
return float(np.dot(along_overall, np.array([point.x, point.y])))
east_cape_dist = point_dist(east_cape)
start_0_2_dist = point_dist(start_0_2)
end_0_2_dist = point_dist(end_0_2)
start_0_5_dist = point_dist(start_0_5)
convergence_start_dist = point_dist(convergence_start)
convergence_end_dist = point_dist(convergence_end)
def coupling(dist: float):
assert dist >= east_cape_dist
if dist < start_0_2_dist:
return 0.2 # * (dist - east_cape_dist) / (start_0_2_dist - east_cape_dist)
elif dist < end_0_2_dist:
return 0.2
elif dist > start_0_5_dist:
return 0.5
else:
# Linear gradient in the middle between two uniform areas
return 0.2 + (0.5 - 0.2) * (dist - end_0_2_dist) / (start_0_5_dist - end_0_2_dist)
def convergence(dist: float):
"""
Linear between 49 mm/yr at -39 to 85 mm/yr at -27.5
"""
south_conv = 49.
north_conv = 85.
return south_conv + (north_conv - south_conv) * (dist - convergence_start_dist) / (convergence_end_dist -
convergence_start_dist)
def convergence_dist(dist):
pass
def kermadec_slip_rate(dist: float, modelled_value: float = 0.):
if modelled_value > 0.:
frac = (dist - east_cape_dist) / (start_0_2_dist - east_cape_dist)
print(frac)
return modelled_value * (1 - frac) + convergence(dist) * coupling(dist) * frac
else:
return convergence(dist) * coupling(dist)
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
from flexget import plugin
from flexget.event import event
from flexget.plugins.filter.seen import FilterSeen
class FilterSeenInfoHash(FilterSeen):
"""Prevents the same torrent from being downloaded twice by remembering the infohash of all downloaded torrents."""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': ['global', 'local']}
]
}
def __init__(self):
# remember and filter by these fields
self.fields = ['torrent_info_hash']
self.keyword = 'seen_info_hash'
@plugin.priority(180)
def on_task_filter(self, task, config):
# Return if we are disabled.
if config is False:
return
# First make sure all the torrent_info_hash fields are in upper case
for entry in task.entries:
if isinstance(entry.get('torrent_info_hash'), basestring):
entry['torrent_info_hash'] = entry['torrent_info_hash'].upper()
FilterSeen.on_task_filter(self, task, config, remember_rejected=True)
def on_task_modify(self, task, config):
# Return if we are disabled.
if config is False:
return
# Run the filter again after the torrent plugin has populated the infohash
self.on_task_filter(task, config)
# Make sure no duplicates were accepted this run
accepted_infohashes = set()
for entry in task.accepted:
if 'torrent_info_hash' in entry:
infohash = entry['torrent_info_hash']
if infohash in accepted_infohashes:
entry.reject('Already accepted torrent with this infohash once for this task')
else:
accepted_infohashes.add(infohash)
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeenInfoHash, 'seen_info_hash', builtin=True, api_ver=2)
|
import os
import time
from argparse import ArgumentParser
from os import makedirs
from os.path import basename, exists
from shutil import copyfile
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader
from config.config import TRAIN_RESOLUTION
from dataset.dataset_metadata import get_dataset_metadata
from dataset.dataset_seg import get_dataset
from dataset.dataset_transforms import get_transform_seg
from model.erfnet import ERFNet
from utils.class_weights import get_class_weights
from utils.iou_meter import IoUMeter
from utils.logger import AutomatedLogger
from utils.misc import copy_object_sourcefile
device = torch.device('cuda')
def save_train_state(args, max_iou, enc: bool, epoch: int, is_best, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
path_best = savedir + '/model_best_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
path_best = savedir + '/model_best.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': str(model),
'state_dict': model.state_dict(),
'best_acc': max_iou,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best, path_checkpoint, path_best)
if (enc):
filename = f'{savedir}/model_encoder-{epoch:03}.pth'
path_best = f'{savedir}/model_encoder_best.pth'
else:
filename = f'{savedir}/model-{epoch:03}.pth'
path_best = f'{savedir}/model_best.pth'
if args.epochs_save > 0 and epoch > 0 and epoch % args.epochs_save == 0:
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch})')
if (is_best):
torch.save(model.state_dict(), path_best)
print(f'save: {path_best} (epoch: {epoch})')
def load_train_resume(enc, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
assert os.path.exists(path_checkpoint)
checkpoint = torch.load(path_checkpoint)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
max_iou = checkpoint['best_acc']
scheduler.load_state_dict(checkpoint['scheduler'])
print(f'checkpoint for epoch {checkpoint["epoch"]} loaded')
return max_iou, start_epoch
def get_DataLoaders(args, enc):
res = TRAIN_RESOLUTION[args.dataset + '_video']
pair_transform = get_transform_seg(args.dataset, enc, augment=True, height=res.height, width=res.width)
pair_transform_val = get_transform_seg(args.dataset, enc, augment=False, height=res.height, width=res.width)
dataset_train = get_dataset(args.dataset, args.datalist, pair_transform, 'train', args.kth_frame)
dataset_val = get_dataset(args.dataset, args.datalist, pair_transform_val, 'val')
pin_mem = not args.no_pin_memory
print(f'Pin memory: {pin_mem}')
loader_train = DataLoader(dataset_train, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True, pin_memory=pin_mem)
loader_val = DataLoader(dataset_val, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False, pin_memory=pin_mem)
return loader_train, loader_val
def trainer(args, model, enc, epoch_callback_before=None):
datasetMeta = get_dataset_metadata(args.dataset)
res = TRAIN_RESOLUTION[args.dataset + '_video']
weight = get_class_weights(args.dataset, args.datalist, enc, res.height, res.width, args.num_workers, args.batch_size, args.kth_frame).to(device)
print(f'Weights are: {weight}')
criterion = nn.CrossEntropyLoss(ignore_index=datasetMeta.IGNORE_INDEX, reduction='mean', weight=weight)
optimizer = Adam(model.parameters(), lr=5e-4, weight_decay=1e-4, betas=(0.9, 0.999))
lam = lambda epoch: pow((1 - (epoch / args.num_epochs)), 0.9)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lam)
return train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before)
def train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before=None):
max_iou = 0
loader_train, loader_val = get_DataLoaders(args, enc)
print(f'Dataset size train: {len(loader_train.dataset)}, val: {len(loader_val.dataset)}')
savedir = args.savedir
logger = AutomatedLogger(savedir, enc)
start_epoch = 1
if args.resume:
max_iou, start_epoch = load_train_resume(enc, model, optimizer, savedir, scheduler)
datasetMeta = get_dataset_metadata(args.dataset)
for epoch in range(start_epoch, args.num_epochs + 1):
if epoch_callback_before:
epoch_callback_before(epoch, model)
curr_lr = 0
for param_group in optimizer.param_groups:
curr_lr = float(param_group['lr'])
print("CURRENT LR: ", param_group['lr'])
iouTrain = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.train_iou else None
average_epoch_loss_train = do_train_epoch(args, model, optimizer, scheduler, criterion, loader_train, enc, epoch, iouTrain)
iouVal = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.val_iou else None
average_epoch_loss_val = do_val_epoch(args, model, criterion, loader_val, enc, epoch, iouVal)
logger.write(epoch, average_epoch_loss_train, average_epoch_loss_val, iouTrain.getIoU()[0] if iouTrain else 0, iouVal.getIoU()[0] if iouVal else 0, curr_lr, 0)
if iouVal is None:
curr_iou = -average_epoch_loss_val
else:
curr_iou = iouVal.getIoU()[0]
is_best = curr_iou > max_iou
max_iou = max(curr_iou, max_iou)
save_train_state(args, max_iou, enc, epoch, is_best, model, optimizer, savedir, scheduler)
if is_best:
msg = f'max val iou={iouVal.getIoU()[0] * 100 if iouVal else 0:.2f}, in epoch {epoch}'
if (not enc):
with open(savedir + "/best.txt", "w") as f:
f.write(msg)
else:
with open(savedir + "/best_encoder.txt", "w") as f:
f.write(msg)
return model
def do_train_epoch(args, model, optimizer, scheduler, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- TRAIN======')
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
effective_to_simulated_batch_ratio = args.batch_size / args.gpu_batch_size
model.train()
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
avg_loss = 0
for inputs, targets in zip(images.split(args.gpu_batch_size), labels.split(args.gpu_batch_size)):
outputs = model(inputs, only_encode=enc)
loss = criterion(outputs, targets[:, 0]) / effective_to_simulated_batch_ratio
avg_loss += loss.item()
loss.backward()
optimizer.step()
batch_losses.append(avg_loss)
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), targets.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on train: {iou_val * 100:.2f}%')
scheduler.step(epoch)
return average_epoch_loss
def do_val_epoch(args, model, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- VALIDATE======')
model.eval()
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
with torch.no_grad():
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
outputs = model(images, only_encode=enc)
loss = criterion(outputs, labels[:, 0])
batch_losses.append(loss.item())
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), labels.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on val: {iou_val * 100:.2f}%')
return average_epoch_loss
def save_checkpoint(state, is_best, path_checkpoint, path_best):
torch.save(state, path_checkpoint)
if is_best:
torch.save(state, path_best)
def main(args):
if not args.no_benchmark:
cudnn.benchmark = True
print(f'cudnn.enabled={cudnn.enabled}, cudnn.benchmark={cudnn.benchmark}')
start_training = time.time()
savedir = args.savedir
if not exists(savedir):
makedirs(savedir)
with open(savedir + '/opts.txt', "w") as f:
f.write(str(args))
datasetMeta = get_dataset_metadata(args.dataset)
model = ERFNet(datasetMeta.NUM_CLASSES)
copy_object_sourcefile(model, savedir)
copyfile(__file__, savedir + '/' + basename(__file__))
model = torch.nn.DataParallel(model).to(device)
if args.weights:
status = model.load_state_dict(torch.load(args.weights), strict=False)
print(status)
if (not args.decoder):
print("-------TRAINING ENC-------")
model = trainer(args, model, enc=True)
print("-------TRAINING DEC-------")
if (not args.weights):
pretrainedEnc = next(model.children()).encoder
model = ERFNet(datasetMeta.NUM_CLASSES, encoder=pretrainedEnc)
model = torch.nn.DataParallel(model).to(device)
trainer(args, model, enc=False)
training_duration = time.time() - start_training
minutes, seconds = divmod(int(training_duration), 60)
hours, minutes = divmod(minutes, 60)
print(f"Training duration: {hours:02}:{minutes:02}:{seconds:02}")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--weights', help='file with initializing weights')
parser.add_argument('--show-loss', type=int, default=200)
parser.add_argument('--epochs-save', type=int, default=0, help='save every n epochs')
parser.add_argument('--decoder', action='store_true', help='if specified, only train the decoder')
parser.add_argument('--train-iou', action='store_true', default=False)
parser.add_argument('--val-iou', action='store_true', default=True)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--no-benchmark', action='store_true')
parser.add_argument('--no-pin-memory', action='store_true')
parser.add_argument('--num-epochs', type=int, default=300)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--gpu-batch-size', type=int, default=None, help='the actual number of images processes simultaneously on GPU. If not specified, defaults to batch-size.')
parser.add_argument('--savedir', default='resources/save/tests')
parser.add_argument('--dataset', choices=['cityscapes', 'cityscapes_small', 'v_kitti'], default='v_kitti')
parser.add_argument('--datalist', default='resources/dataset_lists/full.vkd')
parser.add_argument('--kth-frame', type=int, help='In training, consider labels for every kth frame', default=4, required=False)
parsed_args = parser.parse_args()
if parsed_args.gpu_batch_size is None:
parsed_args.gpu_batch_size = parsed_args.batch_size
assert parsed_args.batch_size % parsed_args.gpu_batch_size == 0
print(parsed_args)
main(parsed_args)
|
from __future__ import unicode_literals
from documents.permissions import permission_document_view
from documents.tests.test_views import GenericDocumentViewTestCase
from ..models import Tag
from ..permissions import (
permission_tag_attach, permission_tag_create, permission_tag_delete,
permission_tag_edit, permission_tag_remove, permission_tag_view
)
from .literals import (
TEST_TAG_COLOR, TEST_TAG_COLOR_EDITED, TEST_TAG_LABEL,
TEST_TAG_LABEL_EDITED
)
class TagViewTestCase(GenericDocumentViewTestCase):
def setUp(self):
super(TagViewTestCase, self).setUp()
self.tag = Tag.objects.create(
color=TEST_TAG_COLOR, label=TEST_TAG_LABEL
)
def tearDown(self):
if self.tag.pk:
self.tag.delete()
super(TagViewTestCase, self).tearDown()
def test_tag_create_view_no_permissions(self):
self.login_user()
self.tag.delete()
self.assertEqual(Tag.objects.count(), 0)
response = self.post(
'tags:tag_create', data={
'label': TEST_TAG_LABEL,
'color': TEST_TAG_COLOR
}
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Tag.objects.count(), 0)
def test_tag_create_view_with_permissions(self):
self.login_user()
self.tag.delete()
self.assertEqual(Tag.objects.count(), 0)
self.grant(permission_tag_create)
response = self.post(
'tags:tag_create', data={
'label': TEST_TAG_LABEL,
'color': TEST_TAG_COLOR
}, follow=True
)
self.assertContains(response, text='created', status_code=200)
self.assertEqual(Tag.objects.count(), 1)
tag = Tag.objects.first()
self.assertEqual(tag.label, TEST_TAG_LABEL)
self.assertEqual(tag.color, TEST_TAG_COLOR)
def test_tag_delete_view_no_permissions(self):
self.login_user()
self.assertEqual(Tag.objects.count(), 1)
response = self.post(
'tags:tag_delete', args=(self.tag.pk,)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Tag.objects.count(), 1)
def test_tag_delete_view_with_permissions(self):
self.login_user()
self.assertEqual(Tag.objects.count(), 1)
self.grant(permission_tag_delete)
response = self.post(
'tags:tag_delete', args=(self.tag.pk,), follow=True
)
self.assertContains(response, text='deleted', status_code=200)
self.assertEqual(Tag.objects.count(), 0)
def test_tag_multiple_delete_view_no_permissions(self):
self.login_user()
self.assertEqual(Tag.objects.count(), 1)
response = self.post(
'tags:tag_multiple_delete', data={'id_list': self.tag.pk}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Tag.objects.count(), 1)
def test_tag_multiple_delete_view_with_permissions(self):
self.login_user()
self.assertEqual(Tag.objects.count(), 1)
self.grant(permission_tag_delete)
response = self.post(
'tags:tag_multiple_delete', data={'id_list': self.tag.pk},
follow=True
)
self.assertContains(response, text='deleted', status_code=200)
self.assertEqual(Tag.objects.count(), 0)
def test_tag_edit_view_no_permissions(self):
self.login_user()
response = self.post(
'tags:tag_edit', args=(self.tag.pk,), data={
'label': TEST_TAG_LABEL_EDITED, 'color': TEST_TAG_COLOR_EDITED
}
)
self.assertEqual(response.status_code, 403)
tag = Tag.objects.get(pk=self.tag.pk)
self.assertEqual(tag.label, TEST_TAG_LABEL)
self.assertEqual(tag.color, TEST_TAG_COLOR)
def test_tag_edit_view_with_permissions(self):
self.login_user()
self.grant(permission_tag_edit)
response = self.post(
'tags:tag_edit', args=(self.tag.pk,), data={
'label': TEST_TAG_LABEL_EDITED, 'color': TEST_TAG_COLOR_EDITED
}, follow=True
)
self.assertContains(response, text='update', status_code=200)
tag = Tag.objects.get(pk=self.tag.pk)
self.assertEqual(tag.label, TEST_TAG_LABEL_EDITED)
self.assertEqual(tag.color, TEST_TAG_COLOR_EDITED)
def test_document_tags_widget_no_permissions(self):
self.login_user()
self.tag.documents.add(self.document)
response = self.get('documents:document_list')
self.assertNotContains(response, text=TEST_TAG_LABEL, status_code=200)
def test_document_tags_widget_with_permissions(self):
self.login_user()
self.tag.documents.add(self.document)
self.grant(permission_tag_view)
self.grant(permission_document_view)
response = self.get('documents:document_list')
self.assertContains(response, text=TEST_TAG_LABEL, status_code=200)
def test_document_attach_tag_view_no_permission(self):
self.login_user()
self.assertEqual(self.document.tags.count(), 0)
self.grant(permission_tag_view)
response = self.post(
'tags:tag_attach', args=(self.document.pk,), data={
'tag': self.tag.pk,
'user': self.user.pk
}
)
# Redirect to previous URL and show warning message about having to
# select at least one object.
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.tags.count(), 0)
def test_document_attach_tag_view_with_permission(self):
self.login_user()
self.assertEqual(self.document.tags.count(), 0)
self.grant(permission_tag_attach)
# permission_tag_view is needed because the form filters the
# choices
self.grant(permission_tag_view)
response = self.post(
'tags:tag_attach', args=(self.document.pk,), data={
'tags': self.tag.pk,
'user': self.user.pk
}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
self.document.tags.all(), (repr(self.tag),)
)
def test_document_multiple_attach_tag_view_no_permission(self):
self.login_user()
self.assertEqual(self.document.tags.count(), 0)
self.grant(permission_tag_view)
response = self.post(
'tags:multiple_documents_tag_attach', data={
'id_list': self.document.pk, 'tags': self.tag.pk,
'user': self.user.pk
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.tags.count(), 0)
def test_document_multiple_attach_tag_view_with_permission(self):
self.login_user()
self.assertEqual(self.document.tags.count(), 0)
self.grant(permission_tag_attach)
# permission_tag_view is needed because the form filters the
# choices
self.grant(permission_tag_view)
response = self.post(
'tags:multiple_documents_tag_attach', data={
'id_list': self.document.pk, 'tags': self.tag.pk,
'user': self.user.pk
}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
self.document.tags.all(), (repr(self.tag),)
)
def test_single_document_multiple_tag_remove_view_no_permissions(self):
self.login_user()
self.document.tags.add(self.tag)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
self.grant(permission_tag_view)
response = self.post(
'tags:single_document_multiple_tag_remove',
args=(self.document.pk,), data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}
)
self.assertEqual(response.status_code, 302)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
def test_single_document_multiple_tag_remove_view_with_permission(self):
self.login_user()
self.document.tags.add(self.tag)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
self.grant(permission_tag_remove)
self.grant(permission_tag_view)
response = self.post(
'tags:single_document_multiple_tag_remove',
args=(self.document.pk,), data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.document.tags.count(), 0)
def test_multiple_documents_selection_tag_remove_view_no_permissions(self):
self.login_user()
self.document.tags.add(self.tag)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
self.grant(permission_tag_view)
response = self.post(
'tags:multiple_documents_selection_tag_remove',
data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}
)
self.assertEqual(response.status_code, 302)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
def test_multiple_documents_selection_tag_remove_view_with_permission(self):
self.login_user()
self.document.tags.add(self.tag)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
self.grant(permission_tag_remove)
self.grant(permission_tag_view)
response = self.post(
'tags:multiple_documents_selection_tag_remove',
data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.document.tags.count(), 0)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
"""Test for python distribution information and metadata handling."""
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
from errno import ENOENT
import os
from os.path import basename, lexists
from pprint import pprint
import tempfile
from conda.common.compat import odict
from conda.common.path import get_python_site_packages_short_path
from conda.common.pkg_formats.python import (
MetadataWarning, PySpec, PythonDistribution, PythonDistributionMetadata,
PythonEggInfoDistribution, PythonInstalledDistribution, get_default_marker_context,
get_dist_file_from_egg_link, get_site_packages_anchor_files, interpret, norm_package_name,
norm_package_version, parse_specification, pypi_name_to_conda_name, split_spec,
)
from conda.common.url import join_url
import pytest
from test_data.env_metadata import METADATA_VERSION_PATHS
# Helpers
# -----------------------------------------------------------------------------
def _create_test_files(test_files):
"""
Helper method to create files in a folder with fname and given content.
test_files = (
('folder', 'fname', 'content'), # Create a file in folder with content
('', 'fname', 'content'), # Create a file with content
('folder', '', ''), # Create a folder
)
"""
temp_path = tempfile.mkdtemp()
fpaths = []
for folder, fname, content in test_files:
fpath = os.path.join(temp_path, folder, fname)
try:
os.makedirs(os.path.dirname(fpath))
except Exception:
pass
with open(fpath, 'w') as fh:
fh.write(content)
fpaths.append(fpath)
return temp_path, fpaths
def _print_output(*args):
"""Helper function to print output in case of failed tests."""
for arg in args:
print(arg)
print('\n')
# Test module helper functions
# -----------------------------------------------------------------------------
def test_norm_package_name():
test_names = (
(None, ''),
('', ''),
('pyOpenssl', 'pyopenssl'),
('py.Openssl', 'py-openssl'),
('py-Openssl', 'py-openssl'),
('py_Openssl', 'py-openssl'),
('zope.interface', 'zope-interface'),
)
for (name, expected_name) in test_names:
parsed_name = norm_package_name(name)
_print_output(name, parsed_name, expected_name)
assert parsed_name == expected_name
def test_pypi_name_to_conda_name():
test_cases = (
(None, ''),
('', ''),
('graphviz', 'python-graphviz'),
)
for (name, expected_name) in test_cases:
parsed_name = pypi_name_to_conda_name(name)
_print_output(name, parsed_name, expected_name)
assert parsed_name == expected_name
def test_norm_package_version():
test_cases = (
(None, ''),
('', ''),
('>=2', '>=2'),
('(>=2)', '>=2'),
(' (>=2) ', '>=2'),
('>=2,<3', '>=2,<3'),
('>=2, <3', '>=2,<3'),
(' (>=2, <3) ', '>=2,<3'),
)
for (version, expected_version) in test_cases:
parsed_version = norm_package_version(version)
_print_output(version, parsed_version, expected_version)
assert parsed_version == expected_version
def test_split_spec():
test_cases = (
# spec, separator, (spec_start, spec_end)
('', ';', ('', '')),
('start;end', ';', ('start', 'end')),
('start ; end', ';', ('start', 'end')),
(' start ; end ', ';', ('start', 'end')),
('start@end', '@', ('start', 'end')),
('start @ end', '@', ('start', 'end')),
(' start @ end ', '@', ('start', 'end')),
)
for spec, sep, expected_output in test_cases:
output = split_spec(spec, sep)
_print_output(spec, output, expected_output)
assert output == expected_output
def test_parse_specification():
test_reqs = {
'':
PySpec('', [], '', '', ''),
'requests':
PySpec('requests', [], '', '', ''),
'requests >1.1':
PySpec('requests', [], '>1.1', '', ''),
'requests[security]':
PySpec('requests', ['security'], '', '', ''),
'requests[security] (>=1.1.0)':
PySpec('requests', ['security'], '>=1.1.0', '', ''),
'requests[security]>=1.5.0':
PySpec('requests', ['security'], '>=1.5.0', '', ''),
'requests[security] (>=4.5.0) ; something >= 27':
PySpec('requests', ['security'], '>=4.5.0', 'something >= 27', ''),
'requests[security]>=3.3.0;something >= 2.7 ':
PySpec('requests', ['security'], '>=3.3.0', 'something >= 2.7', ''),
'requests[security]>=3.3.0;something >= 2.7 or something_else == 1':
PySpec('requests', ['security'], '>=3.3.0', 'something >= 2.7 or something_else == 1', ''),
'requests[security] >=3.3.0 ; something >= 2.7 or something_else == 1':
PySpec('requests', ['security'], '>=3.3.0', 'something >= 2.7 or something_else == 1', ''),
'requests[security] (>=3.3.0) ; something >= 2.7 or something_else == 1':
PySpec('requests', ['security'], '>=3.3.0', 'something >= 2.7 or something_else == 1', ''),
'requests[security] (>=3.3.0<4.4) ; something >= 2.7 or something_else == 1':
PySpec('requests', ['security'], '>=3.3.0<4.4', 'something >= 2.7 or something_else == 1', ''),
'pyOpenSSL>=0.14':
PySpec('pyopenssl', [], '>=0.14', '', ''),
'py.OpenSSL>=0.14':
PySpec('py-openssl', [], '>=0.14', '', ''),
'py-OpenSSL>=0.14':
PySpec('py-openssl', [], '>=0.14', '', ''),
'py_OpenSSL>=0.14':
PySpec('py-openssl', [], '>=0.14', '', ''),
'zope.interface (>3.5.0)':
PySpec('zope-interface', [], '>3.5.0', '', ''),
"A":
PySpec('a', [], '', '', ''),
"A.B-C_D":
PySpec('a-b-c-d', [], '', '', ''),
"aa":
PySpec('aa', [], '', '', ''),
"name":
PySpec('name', [], '', '', ''),
"name<=1":
PySpec('name', [], '<=1', '', ''),
"name>=3":
PySpec('name', [], '>=3', '', ''),
"name>=3,<2":
PySpec('name', [], '>=3,<2', '', ''),
" name ( >= 3, < 2 ) ":
PySpec('name', [], '>=3,<2', '', ''),
"name@http://foo.com":
PySpec('name', [], '', '', 'http://foo.com'),
" name [ fred , bar ] ( >= 3 , < 2 ) ":
PySpec('name', ['fred', 'bar'], '>=3,<2', '', ''),
" name [fred,bar] ( >= 3 , < 2 ) @ http://foo.com ; python_version=='2.7' ":
PySpec('name', ['fred', 'bar'], '>=3,<2', "python_version=='2.7'", 'http://foo.com'),
" name [fred,bar] @ http://foo.com ; python_version=='2.7' ":
PySpec('name', ['fred', 'bar'], '', "python_version=='2.7'", 'http://foo.com'),
"name[quux, strange];python_version<'2.7' and platform_version=='2'":
PySpec('name', ['quux', 'strange'], '', "python_version<'2.7' and platform_version=='2'", ''),
"name; os_name=='a' or os_name=='b'":
PySpec('name', [], '', "os_name=='a' or os_name=='b'", ''),
"name; os_name=='a' and os_name=='b' or os_name=='c'":
PySpec('name', [], '', "os_name=='a' and os_name=='b' or os_name=='c'", ''),
"name; os_name=='a' and (os_name=='b' or os_name=='c')":
PySpec('name', [], '', "os_name=='a' and (os_name=='b' or os_name=='c')", ''),
" name; os_name=='a' or os_name=='b' and os_name=='c' ":
PySpec('name', [], '', "os_name=='a' or os_name=='b' and os_name=='c'", ''),
" name ; (os_name=='a' or os_name=='b') and os_name=='c' ":
PySpec('name', [], '', "(os_name=='a' or os_name=='b') and os_name=='c'", ''),
'>=3,<2':
PySpec('', [], '>=3,<2', '', ''),
' ( >=3 , <2 ) ':
PySpec('', [], '>=3,<2', '', ''),
'>=2.7,!=3.0.*,!=3.1.*,!=3.2.*':
PySpec('', [], '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*', '', ''),
'name>=1.0.0-beta.1,<2.0.0':
PySpec('name', [], '>=1.0.0.beta.1,<2.0.0', '', ''),
'name==1.0.0+localhash':
PySpec('name', [], '==1.0.0+localhash', '', ''),
}
for req, expected_req in test_reqs.items():
parsed_req = parse_specification(req)
_print_output(req, parsed_req, expected_req)
assert parsed_req == expected_req
def test_get_site_packages_anchor_files():
test_cases_valid = (
# dir, filename, content
('bar.dist-info', 'RECORD', ''),
('foo.egg-info', 'PKG-INFO', ''),
('', 'cheese.egg-info', ''),
('', 'spam.egg-link', ''),
)
test_cases_invalid = (
('a.eggs', 'RECORD', ''),
('b.eggs', 'PKG-INFO', ''),
('', 'zoom.path', ''),
('', 'zoom.pth', ''),
('', 'something', ''),
)
# Create test case dirs/files on temp folder
temp_path, fpaths = _create_test_files(test_cases_valid + test_cases_invalid)
ref_dir = os.path.basename(temp_path)
outputs = get_site_packages_anchor_files(temp_path, ref_dir)
# Generate valid output
expected_outputs = set()
for folder, fname, content in test_cases_valid:
expected_output = '/'.join([ref_dir, folder, fname]).replace('//', '/')
expected_outputs.add(expected_output)
_print_output(outputs, expected_outputs)
assert sorted(outputs) == sorted(expected_outputs)
def test_get_dist_file_from_egg_link():
test_files = (
('egg1.egg-info', 'PKG-INFO', ''),
)
temp_path, fpaths = _create_test_files(test_files)
temp_path2, fpaths2 = _create_test_files((('', 'egg1.egg-link', temp_path),))
output = get_dist_file_from_egg_link(fpaths2[0], '')
expected_output = fpaths[0]
_print_output(output, expected_output)
assert output == expected_output
# Test not existing path
temp_path3, fpaths3 = _create_test_files((('', 'egg2.egg-link', '/not-a-path/'),))
with pytest.raises(EnvironmentError) as exc:
get_dist_file_from_egg_link(fpaths3[0], '')
print(exc.value)
# Test existing path but no valig egg-info files
temp_path4 = tempfile.mkdtemp()
temp_path4, fpaths4 = _create_test_files((('', 'egg2.egg-link', temp_path4),))
with pytest.raises(EnvironmentError) as exc:
get_dist_file_from_egg_link(fpaths4[0], '')
print(exc.value)
@pytest.mark.skipif(True, reason="Ask @goanpeca about what this test is looking for.")
def test_get_python_distribution_info():
temp_path_egg1, _ = _create_test_files((
('', 'bar.egg-info', 'Name: bar\n'),
))
temp_path_egg2, _ = _create_test_files((
('lee.egg-info', 'PKG-INFO', 'Name: lee\n'),
))
test_files = (
# Egg link
('', 'boom.egg-link', '/not-a-path/'),
('', 'bar.egg-link', temp_path_egg1),
('', 'lee.egg-link', temp_path_egg2),
# Dist info
('spam.dist-info', 'METADATA', 'Name: spam\n'),
('spam.dist-info', 'RECORD', ''),
('spam.dist-info', 'INSTALLER', ''),
# Egg info
('foo.egg-info', 'METADATA', 'Name: foo\n'),
# Direct file
('', 'cheese.egg-info', 'Name: cheese\n'),
)
temp_path2, fpaths = _create_test_files(test_files)
output_names = ['boom', 'bar', 'lee', 'spam', 'spam', 'spam', 'foo', 'cheese']
for i, fpath in enumerate(fpaths):
output = PythonDistribution.init(temp_path2, basename(fpath), "1.1")
output = output.prefix_record
pprint(output.dump())
if output:
assert output.name == output_names[i]
assert output.name == output_names[i]
else:
assert output is None
# Metadata
# -----------------------------------------------------------------------------
def test_metadata_keys():
cls = PythonDistributionMetadata
for keymap in cls.SINGLE_USE_KEYS, cls.MULTIPLE_USE_KEYS:
for key, value in keymap.items():
assert key.lower().replace('-', '_') == value
def test_metadata_process_path():
name = 'META'
test_files = (
('', name, 'Name: eggs\n'),
)
temp_path, fpaths = _create_test_files(test_files)
func = PythonDistributionMetadata._process_path
# Test valid directory
output = func(temp_path, [name])
expected_output = fpaths[0]
_print_output(output, expected_output)
assert output == expected_output
# Test valid directory (empty files)
output = func(temp_path, [])
expected_output = None
_print_output(output, expected_output)
assert output == expected_output
# Test valid directory (file order)
output = func(temp_path, ['something', name, 'something-else'])
expected_output = fpaths[0]
_print_output(output, expected_output)
assert output == expected_output
# Test valid file
output = func(fpaths[0], [name])
expected_output = fpaths[0]
_print_output(output, expected_output)
assert output == expected_output
def test_metadata_read_metadata():
func = PythonDistributionMetadata._read_metadata
# Test existing file unknown key
temp_path, fpaths = _create_test_files((
('', 'PKG-INFO', 'Unknown-Key: unknown\n'),
))
output = func(fpaths[0])
expected_output = odict()
_print_output(output, expected_output)
assert output == expected_output
# Test existing file known key
temp_path, fpaths = _create_test_files((
('', 'PKG-INFO', 'Name: spam\n'),
))
output = func(fpaths[0])
expected_output = odict(name='spam')
_print_output(output, expected_output)
assert output == expected_output
# Test non existing file
test_fpath = '/foo/bar/METADATA'
output = func(test_fpath)
expected_output = odict()
_print_output(output, expected_output)
assert output == expected_output
def test_metadata():
# Check warnings are raised for None path
with pytest.warns(MetadataWarning):
path = PythonDistributionMetadata._process_path(None, [])
assert path is None
# Check versions
for fpath in METADATA_VERSION_PATHS:
if not lexists(fpath):
pytest.skip("test files not found: %s" % fpath)
meta = PythonDistributionMetadata(fpath)
a = meta.get_dist_requirements()
b = meta.get_python_requirements()
z = meta.get_external_requirements()
c = meta.get_extra_provides()
d = meta.get_dist_provides()
e = meta.get_dist_obsolete()
f = meta.get_classifiers()
name = meta.name
version = meta.version
_print_output(fpath, meta._data, a, b, c, d, e, f, name, version)
assert len(meta._data)
assert name == 'BeagleVote'
assert version == '1.0a2'
# Python Distributions
# -----------------------------------------------------------------------------
def test_basepydist_parse_requires_file_data():
key = 'g'
test_cases = (
# (data, requirements, extras)
('', ([], [])),
('foo\n', (['foo'], [])),
('foo\n\n[:a == "a"]\nbar\n', (['foo', 'bar; a == "a"'], ['a'])),
('foo\n\n[a]\nbar\n', (['foo', 'bar; extra == "a"'], ['a'])),
)
func = PythonDistribution._parse_requires_file_data
for data, (expected_reqs, expected_extras) in test_cases:
output_reqs, output_extras = func(data, key)
_print_output(repr(data), output_reqs, frozenset(expected_reqs))
assert sorted(list(output_reqs)) == sorted(list(expected_reqs))
def test_basepydist_parse_entries_file_data():
func = PythonDistribution._parse_entries_file_data
data = '''
[a]
a = cli:main_1
[b.c]
b = cli:MAIN_2
[b.d]
C = cli:MAIN_3
'''
expected_output = odict()
expected_output['a'] = odict([('a', 'cli:main_1')])
expected_output['b.c'] = odict([('b', 'cli:MAIN_2')])
expected_output['b.d'] = odict([('C', 'cli:MAIN_3')])
output = func(data)
_print_output(output, expected_output)
assert output == expected_output
def test_basepydist_load_requires_provides_file():
temp_path, fpaths = _create_test_files((('', 'depends.txt', 'foo\n\n[a]\nbar\n'), ))
dist = PythonEggInfoDistribution(temp_path, "1.8", None)
exp_req, exp_extra = (['foo', 'bar; extra == "a"'], ['a'])
req, extra = dist._load_requires_provides_file()
_print_output((list(sorted(req)), extra), (list(sorted(exp_req)), exp_extra))
assert (list(sorted(req)), extra) == (list(sorted(exp_req)), exp_extra)
def test_dist_get_paths():
content = 'foo/bar,sha256=1,"45"\nfoo/spam,,\n'
temp_path, fpaths = _create_test_files((('', 'SOURCES.txt', content), ))
sp_dir = get_python_site_packages_short_path("2.7")
dist = PythonEggInfoDistribution(temp_path, "2.7", None)
output = dist.get_paths()
expected_output = [(join_url(sp_dir, "foo", "bar"), '1', 45),
(join_url(sp_dir, "foo", "spam"), None, None)]
_print_output(output, expected_output)
assert output == expected_output
def test_dist_get_paths_no_paths():
temp_path = tempfile.mkdtemp()
dist = PythonEggInfoDistribution(temp_path, "2.7", None)
with pytest.raises(EnvironmentError):
paths = dist.get_paths()
def test_get_dist_requirements():
test_files = (
('', 'METADATA', 'Name: spam\n'),
('', 'requires.txt', 'foo >1.0'),
)
temp_path, fpaths = _create_test_files(test_files)
dist = PythonEggInfoDistribution(temp_path, "2.7", None)
output = dist.get_dist_requirements()
expected_output = frozenset({'foo >1.0'})
_print_output(output, expected_output)
assert output == expected_output
def test_get_extra_provides():
test_files = (
('', 'METADATA', 'Name: spam\n'),
('', 'requires.txt', 'foo >1.0\n[a]\nbar\n'),
)
temp_path, fpaths = _create_test_files(test_files)
dist = PythonEggInfoDistribution(temp_path, "2.7", None)
output = dist.get_extra_provides()
expected_output = ['a']
_print_output(output, expected_output)
assert output == expected_output
def test_get_entry_points():
test_files = (
('', 'METADATA', 'Name: spam\n'),
('', 'entry_points.txt', '[console_scripts]\ncheese = cli:main\n'),
)
temp_path, fpaths = _create_test_files(test_files)
dist = PythonEggInfoDistribution(temp_path, "2.7", None)
output = dist.get_entry_points()
expected_output = odict(console_scripts=odict(cheese='cli:main'))
_print_output(output, expected_output)
assert output == expected_output
def test_pydist_check_files():
test_files = (
('', 'METADATA', '1'),
('', 'RECORD', '2'),
('', 'INSTALLER', '3'),
)
# Test mandatory files found
temp_path, fpaths = _create_test_files(test_files)
PythonInstalledDistribution(temp_path, "2.7", None)
# Test mandatory file not found
os.remove(fpaths[0])
with pytest.raises(EnvironmentError) as exc:
PythonInstalledDistribution(temp_path, "2.7", None)
assert exc.value.errno == ENOENT
def test_python_dist_info():
test_files = (
('', 'METADATA', ('Name: zoom\n'
'Requires-Python: ==2.7\n'
'Requires-External: C\n'
)
),
('', 'RECORD', 'foo/bar,sha256=1,"45"\nfoo/spam,,\n'),
('', 'INSTALLER', ''),
)
# Test mandatory files found
temp_path, fpaths = _create_test_files(test_files)
dist = PythonInstalledDistribution(temp_path, "RECORD", "2.7")
paths = dist.get_paths()
_print_output(paths)
assert len(paths) == 2
assert dist.get_python_requirements() == frozenset(['==2.7'])
assert dist.get_external_requirements() == frozenset(['C'])
def test_python_dist_info_conda_dependencies():
test_files = (
('', 'METADATA', ('Name: foo\n'
'Requires-Python: >2.7,<5.0\n'
'Requires-Dist: bar ; python_version == "2.7"\n'
'Requires-Dist: spam ; python_version == "4.9"\n'
'Provides-Extra: docs\n'
'Requires-Dist: cheese >=1.0; extra == "docs"\n'
)
),
)
temp_path, fpaths = _create_test_files(test_files)
path = os.path.dirname(fpaths[0])
dist = PythonEggInfoDistribution(path, "4.9", None)
depends, constrains = dist.get_conda_dependencies()
assert 'python 4.9.*' in depends
assert 'bar' not in depends
assert 'spam' in depends
assert 'cheese >=1.0' in constrains
dist = PythonEggInfoDistribution(path, "2.7", None)
depends, constrains = dist.get_conda_dependencies()
assert 'python 2.7.*' in depends
assert 'bar' in depends
assert 'spam' not in depends
assert 'cheese >=1.0' in constrains
dist = PythonEggInfoDistribution(path, "3.4", None)
depends, constrains = dist.get_conda_dependencies()
assert 'python 3.4.*' in depends
assert 'bar' not in depends
assert 'spam' not in depends
assert 'cheese >=1.0' in constrains
def test_python_dist_info_conda_dependencies_2():
test_files = (
('', 'METADATA', ('Name: foo\n')),
)
temp_path, fpaths = _create_test_files(test_files)
path = os.path.dirname(fpaths[0])
dist = PythonEggInfoDistribution(path, "4.9", None)
depends, constrains = dist.get_conda_dependencies()
assert 'python 4.9.*' in depends
def test_python_dist_info_conda_dependencies_3():
test_files = (
('', 'METADATA', ('Name: foo\n')),
)
temp_path, fpaths = _create_test_files(test_files)
path = os.path.dirname(fpaths[0])
dist = PythonEggInfoDistribution(path, "3.6", None)
depends, constrains = dist.get_conda_dependencies()
assert "python 3.6.*" in depends
def test_python_dist_egg_path():
test_files = (
('', 'installed-files.txt', 'foo/bar\nfoo/spam\n'),
)
temp_path, fpaths = _create_test_files(test_files)
path = os.path.dirname(fpaths[0])
dist = PythonEggInfoDistribution(path, "2.7", None)
paths = dist.get_paths()
_print_output(paths)
assert len(paths) == 2
def test_python_dist_egg_fpath():
test_files = (
('', 'zoom.egg-info', 'Name: Zoom\nVersion: 1.0\n'),
)
temp_path, fpaths = _create_test_files(test_files)
dist = PythonEggInfoDistribution(fpaths[0], "2.2", None)
assert dist.name == 'Zoom'
assert dist.norm_name == 'zoom'
assert dist.version == '1.0'
# Markers
# -----------------------------------------------------------------------------
def test_evaluate_marker():
# See: https://www.python.org/dev/peps/pep-0508/#complete-grammar
# ((marker_expr, context, extras, expected_output), ...)
test_cases = (
# Valid context
('spam == "1.0"', {'spam': '1.0'}, True),
# Should parse as (a and b) or c
("a=='a' and b=='b' or c=='c'", {'a': 'a', 'b': 'b', 'c': ''}, True),
# Overriding precedence -> a and (b or c)
("a=='a' and (b=='b' or c=='c')", {'a': 'a', 'b': '', 'c': ''}, None),
# Overriding precedence -> (a or b) and c
("(a=='a' or b=='b') and c=='c'", {'a': 'a', 'b': '', 'c': ''}, None),
)
for marker_expr, context, expected_output in test_cases:
output = None
if expected_output:
output = interpret(marker_expr, context)
assert output is expected_output
else:
output = interpret(marker_expr, context)
_print_output(marker_expr, context, output, expected_output)
# Test cases syntax error
test_cases = (
('spam == "1.0"', {}, None),
('spam2 == "1.0"', {'spam': '1.0'}, None),
# Malformed
('spam2 = "1.0"', {'spam': '1.0'}, None),
)
for marker_expr, context, expected_output in test_cases:
output = None
with pytest.raises(SyntaxError):
output = interpret(marker_expr, context)
def test_get_default_marker_context():
context = get_default_marker_context()
for key, val in context.items():
# Check deprecated keys have same value as new keys (. -> _)
if '.' in key:
other_val = context.get(key.replace('.', '_'))
_print_output(val, other_val)
assert val == other_val
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import six
from .topping import Topping
from jawa.constants import ConstantClass, String
from burger.util import class_from_invokedynamic
class TileEntityTopping(Topping):
"""Gets tile entity (block entity) types."""
PROVIDES = [
"identify.tileentity.list",
"tileentities.list",
"tileentities.tags",
"tileentities.networkids"
]
DEPENDS = [
"identify.tileentity.superclass",
"identify.block.superclass",
"packets.classes",
"blocks"
]
@staticmethod
def act(aggregate, classloader, verbose=False):
if "tileentity.superclass" not in aggregate["classes"]:
if verbose:
print("Missing tileentity.superclass")
return
TileEntityTopping.identify_block_entities(aggregate, classloader, verbose)
TileEntityTopping.identify_associated_blocks(aggregate, classloader, verbose)
TileEntityTopping.identify_network_ids(aggregate, classloader, verbose)
@staticmethod
def identify_block_entities(aggregate, classloader, verbose):
te = aggregate.setdefault("tileentity", {})
superclass = aggregate["classes"]["tileentity.superclass"]
cf = classloader[superclass]
# First, figure out whether this is a version where the TE superclass
# is also the TE list.
if cf.constants.find_one(String, lambda c: c.string.value in ('daylight_detector', 'DLDetector')):
# Yes, it is
listclass = superclass
else:
# It isn't, but we can figure it out by looking at the constructor's only parameter.
method = cf.methods.find_one(name="<init>")
assert len(method.args) == 1
listclass = method.args[0].name
cf = classloader[listclass]
aggregate["classes"]["tileentity.list"] = listclass
method = cf.methods.find_one(name="<clinit>")
tileentities = te.setdefault("tileentities", {})
te_classes = te.setdefault("classes", {})
tmp = {}
for ins in method.code.disassemble():
if ins in ("ldc", "ldc_w"):
const = ins.operands[0]
if isinstance(const, ConstantClass):
# Used before 1.13
tmp["class"] = const.name.value
elif isinstance(const, String):
tmp["name"] = const.string.value
elif ins == "invokedynamic":
# Used after 1.13
tmp["class"] = class_from_invokedynamic(ins, cf)
elif ins == "invokestatic":
if "class" in tmp and "name" in tmp:
tmp["blocks"] = []
tileentities[tmp["name"]] = tmp
te_classes[tmp["class"]] = tmp["name"]
tmp = {}
@staticmethod
def identify_associated_blocks(aggregate, classloader, verbose):
te = aggregate["tileentity"]
tileentities = te["tileentities"]
te_classes = te["classes"]
blocks = aggregate["blocks"]["block"]
# Brewing stands are a fairly simple block entity with a clear hierarchy
brewing_stand = blocks["brewing_stand"]
cf = classloader[brewing_stand["class"]]
blockcontainer = cf.super_.name.value
cf = classloader[blockcontainer]
assert len(cf.interfaces) == 1
tileentityprovider = cf.interfaces[0].name.value
cf = classloader[tileentityprovider]
methods = list(cf.methods.find())
assert len(methods) == 1
create_te_name = methods[0].name.value
create_te_desc = methods[0].descriptor.value
has_be_by_class = {}
has_be_by_class[blockcontainer] = True
has_be_by_class[aggregate["classes"]["block.superclass"]] = False
def has_be(cls):
if cls in has_be_by_class:
return has_be_by_class[cls]
cf = classloader[cls]
if has_be(cf.super_.name.value):
has_be_by_class[cls] = True
return True
for interface in cf.interfaces:
# Final case: if it implements the interface but doesn't directly
# extend BlockContainer, it's still a TE
if interface.name.value == tileentityprovider:
has_be_by_class[cls] = True
return True
return False
blocks_with_be = []
for block in six.itervalues(blocks):
if has_be(block["class"]):
blocks_with_be.append(block)
# OK, we've identified all blocks that have block entities...
# now figure out which one each one actually has
for block in blocks_with_be:
# Find the createNewTileEntity method.
# However, it might actually be in a parent class, so loop until it's found
cls = block["class"]
create_te = None
while not create_te:
cf = classloader[cls]
cls = cf.super_.name.value
create_te = cf.methods.find_one(f=lambda m: m.name == create_te_name and m.descriptor == create_te_desc)
for ins in create_te.code.disassemble():
if ins.mnemonic == "new":
const = ins.operands[0]
te_name = te_classes[const.name.value]
block["block_entity"] = te_name
tileentities[te_name]["blocks"].append(block["text_id"])
break
@staticmethod
def identify_network_ids(aggregate, classloader, verbose):
te = aggregate["tileentity"]
tileentities = te["tileentities"]
te_classes = te["classes"]
nbt_tag_type = "L" + aggregate["classes"]["nbtcompound"] + ";"
if "nethandler.client" in aggregate["classes"]:
updatepacket = None
for packet in six.itervalues(aggregate["packets"]["packet"]):
if (packet["direction"] != "CLIENTBOUND" or
packet["state"] != "PLAY"):
continue
packet_cf = classloader[packet["class"][:-len(".class")]] # XXX should we be including the .class sufix in the packet class if we just trim it everywhere we use it?
# Check if the packet has the expected fields in the class file
# for the update tile entity packet
if (len(packet_cf.fields) >= 3 and
# Tile entity type int, at least (maybe also position)
len(list(packet_cf.fields.find(type_="I"))) >= 1 and
# New NBT tag
len(list(packet_cf.fields.find(type_=nbt_tag_type))) >= 1):
# There are other fields, but they vary by version.
updatepacket = packet
break
if not updatepacket:
if verbose:
print("Failed to identify update tile entity packet")
return
te["update_packet"] = updatepacket
nethandler = aggregate["classes"]["nethandler.client"]
nethandler_cf = classloader[nethandler]
updatepacket_name = updatepacket["class"].replace(".class", "")
method = nethandler_cf.methods.find_one(
args="L" + updatepacket_name + ";")
value = None
for ins in method.code.disassemble():
if ins in ("bipush", "sipush"):
value = ins.operands[0].value
elif ins == "instanceof":
if value is None:
# Ensure the command block callback is not counted
continue
const = ins.operands[0]
te_name = te_classes[const.name.value]
tileentities[te_name]["network_id"] = value
value = None
|
from collections import namedtuple
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from torch import jit
from typing import NamedTuple, List, Optional, Dict, Tuple, Any
from jit.test_module_interface import TestModuleInterface # noqa: F401
import unittest
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
import types
class TestScriptPy3(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}") # noqa E999
print(f"format blank") # noqa F541
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
@unittest.skipIf(sys.version_info[:2] < (3, 7), "`dataclasses` module not present on < 3.7")
def test_dataclass_error(self):
from dataclasses import dataclass
@dataclass
class NormalizationInfo(object):
mean: float = 0.0
def compute(self, total_rows):
return self.mean
def fn():
return NormalizationInfo(1, 2, 3, 4, 5)
with self.assertRaisesRegex(OSError, "NormalizationInfo"):
torch.jit.script(fn)
def test_optional_dict_construct(self):
class M(torch.nn.Module):
def use(self, buffer: Dict[str, Optional[torch.Tensor]]):
return buffer["prev_key"]
def forward(self, x):
prev_key = torch.rand(2, 3)
next_key = torch.rand(2, 3)
saved_state: Dict[str, Optional[torch.Tensor]] = {
"prev_key": prev_key,
"next_key": next_key,
}
return self.use(saved_state)
self.checkModule(M(), (torch.rand(2, 2),))
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
input = (3, 'hello')
self.assertEqual(sm(*input), input)
def test_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x) -> float:
fv = FeatureVector(3.0, [3.0], 3.0) # noqa
rv = fv.float_features
for val in fv.sequence_features:
rv += val
rv *= fv.time_since_first
return rv
self.assertEqual(foo(torch.rand(3, 4)), 18.0)
def test_named_tuple_constant(self):
class Tup(NamedTuple):
a: int
b: int
@torch.jit.script
def foo():
return Tup(1, 2)
self.assertEqual(foo(), Tup(1, 2))
def test_dict_preserves_order(self):
def dict_ordering():
a : Dict[int, int] = {}
for i in range(1000):
a[i] = i + 1
return a
self.checkScript(dict_ordering, ())
di = torch.jit.script(dict_ordering)()
res = list(di.items())
for i in range(1000):
key, value = res[i]
self.assertTrue(key == i and value == i + 1)
def test_list_unification_hint(self):
with self.assertRaisesRegex(RuntimeError, "Expected a List type hint"):
@torch.jit.script
def x():
b : int = [2, 3]
return b
def test_return_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x):
fv = FeatureVector(3.0, [3.0], 3.0)
return fv
out = foo(torch.rand(3, 4))
out = foo(torch.rand(3, 4))
self.assertEqual(out.float_features, 3.0)
self.assertEqual(out.sequence_features, [3.0])
self.assertEqual(out.time_since_first, 3.0)
def test_named_tuple_as_attr(self):
class Config(NamedTuple):
size: int
class MyMod(nn.Module):
configs: Dict[int, Config]
def __init__(self, configs):
super().__init__()
self.configs = configs
def forward(self, x):
for _id, config in self.configs.items():
x += config.size
return x
s = torch.jit.script(MyMod({0: Config(size=16)}))
def test_types_as_values(self):
def fn(m: torch.Tensor) -> torch.device:
return m.device
self.checkScript(fn, [torch.randn(2, 2)])
GG = namedtuple('GG', ['f', 'g'])
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z):
# type: (Tensor, Tensor) -> Tuple[GG, GG]
return GG(x, z), GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z) -> Tuple[GG, GG]:
return GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
def test_named_tuple_resolution(self):
class TheType(NamedTuple):
t: int
class MyModule(types.ModuleType):
def __init__(self):
super(MyModule, self).__init__('MyModule')
def __getattr__(self, attr):
return TheType
some_module = MyModule()
def fn() -> some_module.Type:
return some_module.Type(1)
self.checkScript(fn, [])
def test_ignore_with_types(self):
@torch.jit.ignore
def fn(x: Dict[str, Optional[torch.Tensor]]):
return x + 10
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:
self.dropout_modality(in_batch)
fn(in_batch)
return torch.tensor(1)
@torch.jit.ignore
def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:
return in_batch
sm = torch.jit.script(M())
FileCheck().check("dropout_modality").check("in_batch").run(str(sm.graph))
def test_python_callable(self):
class MyPythonClass(object):
@torch.jit.ignore
def __call__(self, *args) -> str:
return str(type(args[0]))
the_class = MyPythonClass()
@torch.jit.script
def fn(x):
return the_class(x)
# This doesn't involve the string frontend, so don't use checkScript
x = torch.ones(2)
self.assertEqual(fn(x), the_class(x))
def test_bad_types(self):
@torch.jit.ignore
def fn(my_arg):
return my_arg + 10
with self.assertRaisesRegex(RuntimeError, "argument 'my_arg'"):
@torch.jit.script
def other_fn(x):
return fn('2')
def test_named_tuple_slice_unpack(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int, b : float, c : List[int]):
tup = MyCoolNamedTuple(a, b, c) # noqa
my_a, my_b, my_c = tup
return tup[:1], my_a, my_c
self.assertEqual(foo(3, 3.5, [6]), ((3,), 3, [6]))
def test_named_tuple_lower(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int):
tup = MyCoolNamedTuple(a, 3.14, [9]) # noqa
return tup
FileCheck().check('TupleConstruct').run(foo.graph)
torch._C._jit_pass_lower_all_tuples(foo.graph)
FileCheck().check_not('TupleConstruct').run(foo.graph)
def test_named_tuple_type_annotation(self):
global MyCoolNamedTuple # see [local resolution in python]
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:
return x
mnt = MyCoolNamedTuple(42, 420.0, [666])
self.assertEqual(foo(mnt), mnt)
def test_named_tuple_wrong_types(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int' for argument 'a'"
" but instead found type 'str'"):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple('foo', 'bar', 'baz') # noqa
return tup
def test_named_tuple_kwarg_construct(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
tup = foo()
self.assertEqual(tup.a, 9)
self.assertEqual(tup.b, 3.5)
self.assertEqual(tup.c, [1, 2, 3])
def test_named_tuple_default_error(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int] = [3, 4, 5]
with self.assertRaisesRegex(RuntimeError, 'Default values are currently not supported'):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
@unittest.skipIf(True, "broken while these tests were not in CI")
def test_named_tuple_serialization(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
return MyCoolNamedTuple(3, 3.5, [3, 4, 5])
mm = MyMod()
mm.save('foo.zip')
torch.testing._internal.jit_utils.clear_class_registry()
loaded = torch.jit.load('foo.zip')
out = mm()
out_loaded = loaded()
for name in ['a', 'b', 'c']:
self.assertEqual(getattr(out_loaded, name), getattr(out, name))
def test_type_annotate_py3(self):
def fn():
a : List[int] = []
b : torch.Tensor = torch.ones(2, 2)
c : Optional[torch.Tensor] = None
d : Optional[torch.Tensor] = torch.ones(3, 4)
for _ in range(10):
a.append(4)
c = torch.ones(2, 2)
d = None
return a, b, c, d
self.checkScript(fn, ())
def wrong_type():
wrong : List[int] = [0.5]
return wrong
with self.assertRaisesRegex(RuntimeError, "Lists must contain only a single type"):
torch.jit.script(wrong_type)
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_unimported_type_resolution(self):
# verify fallback from the python resolver to the c++ resolver
@ torch.jit.script
def fn(x):
# type: (number) -> number
return x + 1
FileCheck().check('Scalar').run(fn.graph)
def test_parser_bug(self):
def parser_bug(o: Optional[torch.Tensor]):
pass
def test_mismatched_annotation(self):
with self.assertRaisesRegex(RuntimeError, 'annotated with type'):
@torch.jit.script
def foo():
x : str = 4
return x
def test_reannotate(self):
with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):
@torch.jit.script
def foo():
x = 5
if True:
x : Optional[int] = 7
def test_module_inplace_construct(self):
class M(nn.Module):
def __init__(self, start: int):
super().__init__()
self.linear = nn.Linear(3, 3)
self.attribute = start
self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))
def method(self) -> int:
return self.attribute
@torch.jit.unused
def unused_method(self):
return self.attribute + self.attribute
def forward(self, x):
return self.linear(self.linear(x))
class N(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 4)
@torch.jit.ignore
def ignored_method(self, x):
return x
def forward(self, x):
return self.linear(x)
m = torch.jit.script(M(3))
n = torch.jit.script(N())
n._reconstruct(m._c)
inp = torch.rand((3))
# Check that both modules produce the same output.
with torch.no_grad():
m_out = m(inp)
n_out = n(inp)
self.assertEqual(m_out, n_out)
# Check that ignored method is still intact.
self.assertEqual(inp, n.ignored_method(inp))
def test_export_opnames_interface(self):
global OneTwoModule
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
pass
def two(self, x):
# type: (Tensor) -> Tensor
pass
def forward(self, x):
# type: (Tensor) -> Tensor
pass
class FooMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x + y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 * x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x * y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 / x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.two(self.one(x, x))
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x):
# type: (Tensor) -> Tensor
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
scripted_M_mod = torch.jit.script(M())
# Temporarily test empty output because lite interpreter does not support interface call
# Replace it with the issubset call when interface call is supported.
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
if __name__ == '__main__':
run_tests()
|
"""
Given a collection of distinct numbers, return all possible permutations.
For example,
[1,2,3] have the following permutations:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
"""
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) <= 1:
return [nums]
solns = []
for i, num in enumerate(nums):
for tail in self.permute(nums[:i] + nums[i+1:]):
solns.append([num] + tail)
return solns
a = Solution()
assert a.permute([1,2,3]) == [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]
|
from django.views import generic
from .models import (Product, LatestProducts,
LaptopsCategory, SmartPhonesCategory)
from c_user.models import User
from django.contrib.auth.views import LoginView
from .forms import UserLoginForm, RegisterForm, UserProfileUpdateForm
from django.shortcuts import redirect, render
class ProductsView(generic.ListView):
template_name = 'products/main_page.html'
model = Product
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['products'] = LatestProducts.objects.get_last_products('laptopscategory', 'smartphonescategory')[:6]
return context
class ProductDetailView(generic.DetailView):
template_name = 'products/detail_laptop.html'
model = LaptopsCategory
class ProductSmartphonesDetailView(generic.DetailView):
template_name = 'products/detail_smart.html'
model = SmartPhonesCategory
# Фильтер
class LaptopsProductView(generic.ListView):
template_name = 'products/laptops.html'
model = Product
def get_context_data(self, **kwargs):
context = super(LaptopsProductView, self).get_context_data(**kwargs)
context['laptops'] = LaptopsCategory.objects.all().order_by('-id')
return context
class SmartphonesProductView(generic.ListView):
template_name = 'products/smartphones.html'
model = Product
def get_context_data(self, **kwargs):
context = super(SmartphonesProductView, self).get_context_data(**kwargs)
context['smartphones'] = SmartPhonesCategory.objects.all().order_by('-id')
return context
# Авторизация
class UserLoginView(LoginView):
template_name = 'account/login.html'
form_class = UserLoginForm
class RegisterView(generic.CreateView):
template_name = 'account/register.html'
form_class = RegisterForm
model = User
class UserProfileView(generic.DetailView):
template_name = 'account/user.html'
model = User
class UserProfileUpdateView(generic.UpdateView):
template_name = 'account/update_user.html'
model = User
form_class = UserProfileUpdateForm
|
def is_isogram(string):
letters = set()
for c in string.lower():
if c in '_- ':
continue
if c in letters:
return False
letters.add(c)
return True
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2018 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import os.path
import importlib
import logging
from typing import Iterable, Union
PathDelimiter = '\\'
def importer_for_format(format):
full_name = 'keepercommander.importer.' + format
module = importlib.import_module(full_name)
if hasattr(module, 'Importer'):
return module.Importer
raise Exception('Cannot resolve importer for format {}'.format(format))
def exporter_for_format(format):
full_name = 'keepercommander.importer.' + format
module = importlib.import_module(full_name)
if hasattr(module, 'Exporter'):
return module.Exporter
raise Exception('Cannot resolve exporter for format {}'.format(format))
def strip_path_delimiter(name, delimiter=PathDelimiter):
folder = name.strip()
if folder == delimiter:
return ''
if len(folder) > 1:
if folder[:1] == delimiter and folder[:2] != delimiter*2:
folder = folder[1:].strip()
if len(folder) > 1:
if folder[-1:] == delimiter and folder[-2:] != delimiter*2:
folder = folder[:-1].strip()
return folder
def path_components(path, delimiter=PathDelimiter):
# type: (str, str) -> Iterable[str]
p = path.strip()
pos = 0
while pos < len(p):
idx = p.find(delimiter, pos)
if idx >= 0:
if idx+1 < len(p):
if p[idx+1] == delimiter:
pos = idx + 2
continue
comp = p[:idx].strip()
p = p[idx+1:].strip()
pos = 0
if len(comp) > 0:
yield comp.replace(2*delimiter, delimiter)
else:
p = strip_path_delimiter(p, delimiter=delimiter)
if len(p) > 0:
yield p.replace(2*delimiter, delimiter)
p = ''
class Permission:
def __init__(self):
self.uid = None
self.name = None
self.manage_users = None
self.manage_records = None
class SharedFolder:
def __init__(self):
self.uid = None
self.path = None
self.manage_users = None
self.manage_records = None
self.can_edit = None
self.can_share = None
self.permissions = None # type: [Permission]
class Attachment:
def __init__(self):
self.file_id = None
self.name = None
self.size = None
self.key = None
self.mime = None
def open(self):
raise NotImplemented
class Folder:
def __init__(self):
self.uid = None
self.domain = None # type: str
self.path = None # type: str
self.can_edit = None
self.can_share = None
class Record:
def __init__(self):
self.uid = None
self.title = None
self.login = None
self.password = None
self.login_url = None
self.notes = None
self.custom_fields = {}
self.folders = None # type: [Folder]
self.attachments = None # type: [Attachment]
class BaseImporter:
def execute(self, name):
# type: (BaseImporter, str) -> Iterable[Union[Record, SharedFolder]]
yield from self.do_import(name)
def do_import(self, filename):
# type: (BaseImporter, str) -> Iterable[Union[Record, SharedFolder]]
raise NotImplemented()
def extension(self):
return ''
class BaseFileImporter(BaseImporter):
def execute(self, name):
# type: (BaseFileImporter, str) -> Iterable[Union[Record, SharedFolder]]
path = os.path.expanduser(name)
if not os.path.isfile(path):
ext = self.extension()
if ext:
path = path + '.' + ext
if not os.path.isfile(path):
raise Exception('File \'{0}\' does not exist'.format(name))
yield from self.do_import(path)
class BaseExporter:
def execute(self, filename, records):
# type: (BaseExporter, str, [Union[Record, SharedFolder]]) -> None
if filename:
filename = os.path.expanduser(filename)
if filename.find('.') < 0:
ext = self.extension()
if ext:
filename = filename + '.' + ext
elif not self.supports_stdout():
logging.error("stdout is not supported for this file format")
return
self.do_export(filename, records)
def do_export(self, filename, records):
# type: (BaseExporter, str, [Union[Record, SharedFolder]]) -> None
raise NotImplemented()
def has_shared_folders(self):
return False
def has_attachments(self):
return False
def extension(self):
return ''
def supports_stdout(self):
return False
|
from nwb_conversion_tools.basedatainterface import BaseDataInterface
from spikeextractors import SpikeGLXRecordingExtractor
from pynwb import NWBFile
from pathlib import Path
import pyopenephys
from .utils_expo import process_blocksV2, process_passesV2
class ExpoDataInterface(BaseDataInterface):
"""Conversion class for FRET data."""
@classmethod
def get_source_schema(cls):
"""Return a partial JSON schema indicating the input arguments and their types."""
source_schema = super().get_source_schema()
source_schema.update(
required=[],
properties=dict(
expo_file=dict(
type="string",
format="file",
description="path to xml file containing expo data"
),
ttl_file=dict(
type="string",
format="file",
description="path to file containing TTL data for synchronization (openephys or spikeglx)"
)
)
)
return source_schema
@classmethod
def get_conversion_options_schema(cls):
"""Compile conversion option schemas from each of the data interface classes."""
conversion_options_schema = super().get_conversion_options_schema()
conversion_options_schema.update(
required=[],
properties=dict(
convert_expo=dict(
type="boolean",
description="Whether to convert Expo data to NWB or not"
)
)
)
return conversion_options_schema
def get_conversion_options(self):
conversion_options = dict(convert_expo=True)
return conversion_options
def run_conversion(self, nwbfile: NWBFile, metadata: dict, convert_expo: bool=False):
"""
Run conversionfor this data interface.
Parameters
----------
nwbfile : NWBFile
metadata : dict
"""
if not convert_expo:
print('Expo data not included in conversion')
return
print(f'Adding expo data...')
# Get sync offset time to add to Expo trials timestamps
ttl_file = self.source_data['ttl_file']
t_offset = 0
if Path(ttl_file).is_file():
if ttl_file.endswith('.continuous'): # OpenEphys
t_offset = get_first_sync_time_openephys(filepath=ttl_file)
elif '.imec.' in ttl_file: # SpikeGLX
# raise NotImplementedError("SpikeGLX offset not implemented")
t_offset = get_first_sync_time_spikeglx(filepath=ttl_file)
print(f'Trials sync offset: {t_offset}')
expo_file = self.source_data['expo_file']
if expo_file.endswith('.xml'):
trials = process_passesV2(expo_file)
blocks = process_blocksV2(expo_file)
else:
raise OSError(f"Expo file should be of xml type. File used: {expo_file}")
# Add trials intervals values only if no trials data exists in nwbfile
if nwbfile.trials is not None:
print('Trials already exist in current nwb file. Expo trials intervals not added.')
else:
# Get parameters names to form trials columns
col_names = list()
for params in blocks.values():
for p in params.keys():
if p not in col_names:
col_names.append(p)
# Add trials columns
for c in col_names:
nwbfile.add_trial_column(
name=c,
description='no description'
)
# Add trials rows
for tr in trials.values():
trial_dict = dict(
start_time=tr['StartTime'] + t_offset,
stop_time=tr['EndTime'] + t_offset
)
for c in col_names:
trial_dict[c] = blocks[str(tr['BlockID'])].get(c, '')
nwbfile.add_trial(**trial_dict)
def get_first_sync_time_openephys(filepath):
"""
Get first TTL pulse time from 100_ADC1.continuous file
to synchronize Expo trials with Openephys recording
"""
d = pyopenephys.openephys_tools.loadContinuous(filepath=filepath)
for i, b in enumerate(d['data'] > 10000):
if b:
return i / float(d['header']['sampleRate'])
return None
def get_first_sync_time_spikeglx(filepath):
"""
Get first TTL pulse time from spikeglx file
to synchronize Expo trials with SpikeGLX recording
"""
rec = SpikeGLXRecordingExtractor(file_path=filepath)
xx, yy = rec.get_ttl_events()
rate = rec.get_sampling_frequency()
return xx[0] / rate
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^markers.json$', 'robinson_app.views.json_markers'),
url(r'^markers/(?P<photo_pk>\d*).json$', 'robinson_app.views.json_markers_details'),
url(r'^photo/(?P<photo_pk>\d*)/$', 'robinson_app.views.photo'),
url(r'^$', 'robinson_app.views.map'),
]
# Serve static content from Django only when in DEBUG
if settings.DEBUG:
media_url = settings.MEDIA_URL
if len(media_url) > 1 and media_url.startswith('/'):
media_url = media_url[1:]
urlpatterns.extend([
url(r'', include('gmapi.urls.media')),
url(r'^%s(?P<path>.*)$' % media_url, 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT }),
])
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a ROI-FCN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
imdb, roidb = combined_roidb(args.imdb_name)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
|
"""Looping Examples"""
dogs = ['husky', 'beagle', 'doberman', 'dachsund', 'collie', 'mutt']
# This is an abomination! Who would do something like this?
i = 0; max = len(dogs)
while i < max:
print(dogs[i])
i += 1
print("")
# This is better, for some value of better
# compared to above, but NOT Pythonic!
for i in range(len(dogs)):
print(dogs[i])
print("")
# pythonic
for dog in dogs:
print(dog)
|
#test module for tfrecords
from deepforest import tfrecords
from deepforest import utilities
from deepforest import preprocess
from deepforest import get_data
import pytest
import os
import glob
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from keras_retinanet.preprocessing import csv_generator
from keras_retinanet import models
@pytest.fixture()
def config():
print("Configuring tfrecord tests")
config = {}
config["patch_size"] = 200
config["patch_overlap"] = 0.05
config["annotations_xml"] = get_data("OSBS_029.xml")
config["rgb_dir"] = "data"
config["annotations_file"] = "tests/data/OSBS_029.csv"
config["path_to_raster"] =get_data("OSBS_029.tif")
config["image-min-side"] = 800
config["backbone"] = "resnet50"
#Create a clean config test data
annotations = utilities.xml_to_annotations(xml_path=config["annotations_xml"])
annotations.to_csv("tests/data/tfrecords_OSBS_029.csv",index=False)
annotations_file = preprocess.split_raster(path_to_raster=config["path_to_raster"],
annotations_file="tests/data/tfrecords_OSBS_029.csv",
base_dir= "tests/data/",
patch_size=config["patch_size"],
patch_overlap=config["patch_overlap"])
annotations_file.to_csv("tests/data/testfile_tfrecords.csv", index=False,header=False)
class_file = utilities.create_classes("tests/data/testfile_tfrecords.csv")
return config
#Reading
#Writing
@pytest.fixture()
def test_create_tfrecords(config):
"""This test is in flux due to the fact that tensorflow and cv2 resize methods are not identical: https://jricheimer.github.io/tensorflow/2019/02/11/resize-confusion/ """
created_records = tfrecords.create_tfrecords(annotations_file="tests/data/testfile_tfrecords.csv",
class_file="tests/data/classes.csv",
image_min_side=config["image-min-side"],
backbone_model=config["backbone"],
size=100,
savedir="tests/data/")
assert os.path.exists("tests/data/testfile_tfrecords_0.tfrecord")
return created_records
@pytest.fixture()
def setup_create_tensors(test_create_tfrecords):
created_tensors = tfrecords.create_tensors(test_create_tfrecords)
return created_tensors
def test_create_tensors(test_create_tfrecords):
print("Testing that input tensors can be created")
created_tensors = tfrecords.create_tensors(test_create_tfrecords)
assert len(created_tensors) == 2
return created_tensors
def test_create_dataset(test_create_tfrecords):
dataset = tfrecords.create_dataset(test_create_tfrecords)
def test_equivalence(config, setup_create_tensors):
#unpack created tensors
tf_inputs, tf_targets = setup_create_tensors
#the image going in to tensorflow should be equivalent to the image from the fit_generator
backbone = models.backbone(config["backbone"])
#CSV generator
generator = csv_generator.CSVGenerator(
csv_data_file="tests/data/testfile_tfrecords.csv",
csv_class_file="tests/data/classes.csv",
image_min_side=config["image-min-side"],
preprocess_image=backbone.preprocess_image,
)
#find file in randomize generator group
first_file = generator.groups[0][0]
gen_filename = os.path.join(generator.base_dir, generator.image_names[first_file])
original_image = generator.load_image(first_file)
inputs, targets = generator.__getitem__(0)
image = inputs[0,...]
targets = targets[0][0,...]
with tf.Session() as sess:
#seek the randomized image to match
tf_inputs, tf_targets = sess.run([tf_inputs,tf_targets])
#assert filename is the same as generator
#assert gen_filename == filename
#tf_image = tf_image[0,...]
tf_inputs = tf_inputs[0,...]
tf_targets = tf_targets[0][0,...]
#Same shape
#assert tf_image.shape == image.shape
assert tf_inputs.shape == image.shape
assert tf_targets.shape == targets.shape
#Same values, slightly duplicitious with above, but useful for debugging.
#Saved array is the same as generator image
#np.testing.assert_array_equal(image, tf_image)
#Loaded array is the same as generator, this is not true currently, the opencv and the tensorflow interpolation method is slightly different, waiting for tf. 2.0
#np.testing.assert_array_equal(tf_loaded, tf_image)
##Useful for debug to plot
#fig = plt.figure()
#ax1 = fig.add_subplot(1,4,1)
#ax1.title.set_text('Fit Gen Original')
#plt.imshow(original_image[...,::-1])
#ax1 = fig.add_subplot(1,4,2)
#ax1.title.set_text('Fit Generator')
#plt.imshow(image)
#ax2 = fig.add_subplot(1,4,3)
#ax2 = fig.add_subplot(1,4,4)
#ax2.title.set_text('Loaded Image')
#plt.imshow(tf_inputs)
#plt.show()
#Check for bad file types
#@pytest.fixture()
#def bad_annotations():
#annotations = utilities.xml_to_annotations(get_data("OSBS_029.xml"))
#f = "tests/data/testfile_error_deepforest.csv"
#annotations.to_csv(f,index=False,header=False)
#return f
#def test_tfdataset_error(bad_annotations):
#with pytest.raises(ValueError):
#records_created = tfrecords.create_tfrecords(annotations_file=bad_annotations, class_file=get_data("classes.csv"), image_min_side=800, backbone_model="resnet50", size=100, savedir="tests/data/")
|
# Find the highest number not in the list
import numpy as np
l = [1, 3, 5, 9, 11]
a = np.array(l)
ans = 1
for i in range(a.max(initial=0) - 1, max(0, a.min(initial=0)), -1):
if i not in a:
ans = i
print(i)
break
A = [1, 3, 5, 9, 11]
Ans = 1
for j in range(max(A)-1, max(0, min(A)), -1):
if j not in A:
Ans = j
print(Ans)
break
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="treemap.marker.colorbar", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
from blackduck import Client
from blackduck.Client import HubSession
from blackduck.Authentication import CookieAuth
import argparse
import logging
from pprint import pprint
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] {%(module)s:%(lineno)d} %(levelname)s - %(message)s"
)
parser = argparse.ArgumentParser("Authenticate using username/password credentials")
parser.add_argument("--base-url", required=True, help="Hub server URL e.g. https://your.blackduck.url")
parser.add_argument("--username", required=True, help="Hub user")
parser.add_argument("--password", required=True, help="Hub user")
parser.add_argument("--no-verify", dest='verify', action='store_false', help="disable TLS certificate verification")
args = parser.parse_args()
base_url = args.base_url
session = HubSession(base_url, timeout=15.0, retries=3, verify=args.verify)
auth = CookieAuth(session, args.username, args.password)
bd = Client(base_url=base_url, session=session, auth=auth)
pprint(bd.list_resources())
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script.script import Script
from ams import ams
from ams_service import ams_service
from status import check_service_status
from ambari_commons.repo_manager.repo_manager_helper import check_installed_metrics_hadoop_sink_version
class AmsMonitor(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
self.configure(env) # for security
def configure(self, env):
import params
env.set_params(params)
ams(name='monitor')
def start(self, env, upgrade_type=None):
self.configure(env) # for security
ams_service( 'monitor',
action = 'start'
)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
ams_service( 'monitor',
action = 'stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
check_service_status(env, name='monitor')
def get_log_folder(self):
import params
return params.ams_monitor_log_dir
def get_pid_files(self):
import status_params
return [status_params.monitor_pid_file]
def get_user(self):
import params
return params.ams_user
def check_hadoop_sink_version(self, env):
import params
check_installed_metrics_hadoop_sink_version(checked_version=params.min_hadoop_sink_version,
less_valid=False,
equal_valid=True)
if __name__ == "__main__":
AmsMonitor().execute()
|
"""
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'django-insecure-i74b^d8tyb8c^@nzu)^32$1fx7t8rb1)khoj*4_h8aaz0vv_&o')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get('DEBUG', 1))
if os.environ.get('DJANGO_ALLOWED_HOSTS'):
ALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS').split(' ')
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'markdownx',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'blog',
'single_pages',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get("SQL_ENGINE", 'django.db.backends.sqlite3'),
'NAME': os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get("SQL_USER", 'user'),
'PASSWORD': os.environ.get("SQL_PASSWORD", 'password'),
'HOST': os.environ.get("SQL_HOST", 'localhost'),
'PORT': os.environ.get("SQL_PORT", '5432'),
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
MEDIA_URL = '/media/' # 이미지 파일들의 URL 지정
MEDIA_ROOT = os.path.join(BASE_DIR, '_media') # os모듈로 이미지 파일이 위치할 디렉터리 이름 지정
CRISPY_TEMPLATE_PACK = 'bootstrap4' # crispy_form의 스타일을 bootstrap4로 지정
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
LOGIN_REDIRECT_URL = '/blog/'
|
# -*- coding: utf-8 -*-
import zipfile
import json
from src.loader.xmltodict import parse
from src.loader.graph import Graph
class Converter(object):
def __init__(self, file_path) -> None:
super().__init__()
with open(file_path, 'rb') as f:
zip = zipfile.ZipFile(f)
xml_content = zip.read('word/document.xml')
self.e = parse(xml_content)
self.e = json.loads(json.dumps(self.e))
self.graphs = []
def create_edges(self, data_json):
for key, value in data_json.items():
if isinstance(value, dict):
for item in value.keys():
if "@xmlns:" not in key and "@mc:Ignorable" not in key\
and "@xmlns:" not in item\
and "@mc:Ignorable" not in item:
self.graphs.append((key, item))
self.create_edges(value)
elif isinstance(value, list):
for item in value:
for i in item.keys():
if "@xmlns:" not in key and "@mc:Ignorable" not in key\
and "@xmlns:" not in i\
and "@mc:Ignorable" not in i:
self.graphs.append((key, i))
self.create_edges(item)
else:
if value != None and "@xmlns:" not in key and "@mc:Ignorable" not in key\
and "@xmlns:" not in value\
and "@mc:Ignorable" not in value:
# Remove text
if "w:t" != key:
self.graphs.append((key, value))
def dump_print(self):
print(json.dumps(self.e, indent = 4))
def convert(self):
self.create_edges(self.e)
def return_graph(self):
return Graph(self.graphs)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simsc.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
from datetime import datetime
import logging
import pandas as pd
from utils.fetcher_abstract import AbstractFetcher
__all__ = ('WorldECDCFetcher',)
logger = logging.getLogger(__name__)
class WorldECDCFetcher(AbstractFetcher):
LOAD_PLUGIN = True
def fetch(self):
url = 'https://opendata.ecdc.europa.eu/covid19/casedistribution/csv'
logger.debug('Fetching world confirmed cases, deaths data from ECDC')
return pd.read_csv(url)
def run(self):
data = self.fetch()
data.sort_values(['countryterritoryCode', 'dateRep'])
# Data contains new cases and deaths for each day. Get cumulative data by sorting by country
# code and date, then reverse iterating (old to new) and adding to cumulative data for the same country
last_country_code = None
last_country_total_confirmed_cases = 0
last_country_total_deaths = 0
for index, record in data[::-1].iterrows():
# CSV file has format: dateRep,day,month,year,cases,deaths,geoId,continentExp,countryterritoryCode,
# popData2018,countriesAndTerritories
# Date has format 'DD/MM/YYYY'; need to convert it to 'YYYY-MM-DD' format before adding to database
date_ddmmyyyy = record[0]
date = datetime.strptime(date_ddmmyyyy, '%d/%m/%Y').strftime('%Y-%m-%d')
country = record['countriesAndTerritories']
country_code = record['countryterritoryCode']
confirmed = int(record['cases'])
dead = int(record['deaths'])
if last_country_code is None or last_country_code != country_code:
# New country so reset counters
last_country_total_confirmed_cases = confirmed
last_country_total_deaths = dead
last_country_code = country_code
else:
last_country_total_confirmed_cases += confirmed
last_country_total_deaths += dead
upsert_obj = {
'source': 'WRD_ECDC',
'date': date,
'country': country,
'countrycode': country_code,
'adm_area_1': None,
'adm_area_2': None,
'adm_area_3': None,
'confirmed': last_country_total_confirmed_cases,
'dead': last_country_total_deaths
}
self.db.upsert_epidemiology_data(**upsert_obj)
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from cms.models import CMSPlugin
from djangocms_bootstrap5.constants import COLOR_STYLE_CHOICES
from djangocms_bootstrap5.fields import AttributesField, TagTypeField
from .constants import LISTGROUP_STATE_CHOICES
class Bootstrap5ListGroup(CMSPlugin):
"""
Components > "List Group" Plugin
https://getbootstrap.com/docs/5.0/components/list-group/
"""
list_group_flush = models.BooleanField(
verbose_name=_('List group flush'),
default=False,
help_text=_('Create lists of content in a card with a flush list group.')
)
tag_type = TagTypeField()
attributes = AttributesField()
def __str__(self):
return str(self.pk)
def get_short_description(self):
text = ''
if self.list_group_flush:
text += '.list-group-flush'
return text
class Bootstrap5ListGroupItem(CMSPlugin):
"""
Components > "List Group Ite" Plugin
https://getbootstrap.com/docs/5.0/components/list-group/
"""
list_context = models.CharField(
verbose_name=_('Context'),
choices=COLOR_STYLE_CHOICES,
blank=True,
max_length=255,
)
list_state = models.CharField(
verbose_name=_('State'),
choices=LISTGROUP_STATE_CHOICES,
blank=True,
max_length=255,
)
tag_type = TagTypeField()
attributes = AttributesField()
def __str__(self):
return str(self.pk)
def get_short_description(self):
text = []
if self.list_context:
text.append('.list-group-item-{}'.format(self.list_context))
if self.list_state:
text.append('.{}'.format(self.list_state))
return ' '.join(text)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
relative_dates.py: Getting a new relative date.
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
#Base date
base_date = datetime(2018,1,31,10,11,12)
#Add 1 month
new_date = base_date + relativedelta(months=+1)
#Print base date.
print(base_date)
#Print new date.
print(new_date)
|
from .util_functions import sort_multiple_arrays_using_one, reverse_complement, output_transcript_sequences_to_fasta, \
download_pdb_file, get_uniprot_acc_from_transcript_id, read_sbs_from_vcf
from .gene_sequence_functions import get_genomic_ranges_for_gene, get_positions_from_ranges, \
get_gene_kmers_from_exon_ranges, get_all_possible_single_nucleotide_mutations
from .sifts_functions import get_sifts_alignment, get_sifts_alignment_for_chain, get_pdb_positions
|
# -*- coding: utf-8 -*-
"""
Comparison of computational time for generalized linear mixed effects models
Author: Fabio Sigrist, May 2021
"""
import pandas as pd
import numpy as np
import os
import time
import statsmodels.genmod.bayes_mixed_glm as glm
path_data = "C:\\GLMM_comparison\\"
# load data
group_data_P = pd.read_csv(os.path.join(path_data, 'group_data.csv'),squeeze=True)
X_P = pd.read_csv(os.path.join(path_data, 'X.csv'),squeeze=True)
y_P = pd.read_csv(os.path.join(path_data, 'y.csv'),squeeze=True)
likelihood_P = pd.read_csv(os.path.join(path_data, 'likelihood.csv'),squeeze=True)[0]
if(len(group_data_P.shape)==1):
num_randeff = 1
else:
num_randeff = group_data_P.shape[1]
num_coef = X_P.shape[1]
# Fit model
if likelihood_P == 'bernoulli_probit':
model = glm.BinomialBayesMixedGLM(endog=y_P, exog=X_P, exog_vc=group_data_P,
ident=np.arange(0,num_randeff))
elif likelihood_P == 'poisson':
model = glm.PoissonBayesMixedGLM(endog=y_P, exog=X_P, exog_vc=group_data_P,
ident=np.arange(0,num_randeff))
start = time.time()
model = model.fit_map()
end = time.time()
time_statsmodels = (end - start)
# Extract fitted values
summary = model.summary()
coefs = summary.tables[0]['Post. Mean'][0:num_coef]
vcs = np.exp(summary.tables[0]['Post. Mean'][num_coef:]) ** 2
coefs = coefs.astype(float)
vcs = vcs.astype(float)
# Calculate MSEs
coefs_true = pd.Series([0]).append(pd.Series(np.ones(num_coef-1)))
vcs_true = pd.Series(np.ones(num_randeff))
mse_coefs_statsmodels = np.mean((coefs.values - coefs_true.values)**2)
mse_vcs_statsmodels = np.mean((vcs.values - vcs_true.values)**2)
|
from os.path import exists
from typing import List
from ...api.models import log as log_model
from . import extractor
from . import transformer
def get_is_file(logs_path: str) -> log_model.LogFile:
return log_model.LogFile(
is_file=exists(logs_path),
path=logs_path,
)
def get_remote_addrs_count_from_file(logs_path: str) -> List[log_model.RemoteAddrCount]:
logs = extractor.extract(logs_path)
return transformer.get_remote_addrs_count(logs)
|
# -*- coding: utf-8 -*-
# Copyright 2015-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2015
# - Cedric Serfon <cedric.serfon@cern.ch>, 2015
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018-2020
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Martin Barisits <martin.barisits@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
import unittest
import pytest
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from rucio.client.ruleclient import RuleClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import InvalidObject
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import generate_uuid
from rucio.core.did import attach_dids, add_did, add_dids
from rucio.core.replica import list_datasets_per_rse, update_collection_replica, \
get_cleaned_updated_collection_replicas, delete_replicas, add_replicas
from rucio.core.rse import add_rse, del_rse, add_protocol, get_rse_id
from rucio.db.sqla import session, models, constants
from rucio.db.sqla.constants import ReplicaState
from rucio.tests.common import rse_name_generator
class TestDatasetReplicaClient(unittest.TestCase):
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
@pytest.mark.noparallel(reason='uses pre-defined RSE')
def test_list_dataset_replicas(self):
""" REPLICA (CLIENT): List dataset replicas."""
replica_client = ReplicaClient()
rule_client = RuleClient()
did_client = DIDClient()
scope = 'mock'
dataset = 'dataset_' + str(generate_uuid())
did_client.add_dataset(scope=scope, name=dataset)
rule_client.add_replication_rule(dids=[{'scope': scope, 'name': dataset}],
account='root', copies=1, rse_expression='MOCK',
grouping='DATASET')
replicas = [r for r in replica_client.list_dataset_replicas(scope=scope, name=dataset)]
assert len(replicas) == 1
@pytest.mark.noparallel(reason='uses pre-defined RSE')
def test_list_dataset_replicas_bulk(self):
""" REPLICA (CLIENT): List dataset replicas bulk."""
replica_client = ReplicaClient()
rule_client = RuleClient()
did_client = DIDClient()
scope = 'mock'
did1 = {'scope': scope, 'name': 'dataset_' + str(generate_uuid())}
did_client.add_dataset(**did1)
did2 = {'scope': scope, 'name': 'dataset_' + str(generate_uuid())}
did_client.add_dataset(**did2)
dids = [did1, did2]
rule_client.add_replication_rule(dids=dids,
account='root', copies=1, rse_expression='MOCK',
grouping='DATASET')
with pytest.raises(InvalidObject):
replica_client.list_dataset_replicas_bulk(dids=[{'type': "I'm Different"}])
replicas = list(replica_client.list_dataset_replicas_bulk(dids=dids))
assert len(replicas) == 2
for did in dids:
def replica_contains_did(rep):
return all(map(lambda k: k in rep and did[k] == rep[k], did))
assert any(map(replica_contains_did, replicas)), "%s must be in returned replicas" % (did, )
@pytest.mark.noparallel(reason='uses pre-defined RSE')
def test_list_datasets_per_rse(self):
""" REPLICA (CLIENT): List datasets in RSE."""
rule_client = RuleClient()
did_client = DIDClient()
scope = 'mock'
dataset = 'dataset_' + str(generate_uuid())
did_client.add_dataset(scope=scope, name=dataset)
rule_client.add_replication_rule(dids=[{'scope': scope, 'name': dataset}],
account='root', copies=1, rse_expression='MOCK',
grouping='DATASET')
replicas = [r for r in list_datasets_per_rse(rse_id=get_rse_id(rse='MOCK', **self.vo),
filters={'scope': InternalScope(scope, **self.vo), 'name': 'data*'})]
assert replicas != []
def test_list_dataset_replicas_archive(self):
""" REPLICA (CLIENT): List dataset replicas with archives. """
replica_client = ReplicaClient()
did_client = DIDClient()
rule_client = RuleClient()
scope = 'mock'
rse = 'APERTURE_%s' % rse_name_generator()
rse_id = add_rse(rse, **self.vo)
add_protocol(rse_id=rse_id, parameter={'scheme': 'root',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
rse2 = 'BLACKMESA_%s' % rse_name_generator()
rse2_id = add_rse(rse2, **self.vo)
add_protocol(rse_id=rse2_id, parameter={'scheme': 'root',
'hostname': 'root.blackmesa.com',
'port': 1409,
'prefix': '//underground/facility',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
# register archive
archive = {'scope': scope, 'name': 'another.%s.zip' % generate_uuid(),
'type': 'FILE', 'bytes': 2596, 'adler32': 'deedbeaf'}
replica_client.add_replicas(rse=rse, files=[archive])
replica_client.add_replicas(rse=rse2, files=[archive])
archived_files = [{'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
'bytes': 4322, 'adler32': 'deaddead'} for i in range(2)]
replica_client.add_replicas(rse=rse2, files=archived_files)
did_client.add_files_to_archive(scope=scope, name=archive['name'], files=archived_files)
dataset_name = 'find_me.' + str(generate_uuid())
did_client.add_dataset(scope=scope, name=dataset_name)
did_client.attach_dids(scope=scope, name=dataset_name, dids=archived_files)
rule_client.add_replication_rule(dids=[{'scope': scope, 'name': dataset_name}],
account='root', copies=1, rse_expression=rse,
grouping='DATASET')
res = [r for r in replica_client.list_dataset_replicas(scope=scope,
name=dataset_name)]
assert len(res) == 1
assert res[0]['state'] == 'UNAVAILABLE'
res = [r for r in replica_client.list_dataset_replicas(scope=scope,
name=dataset_name,
deep=True)]
assert len(res) == 3
assert res[0]['state'] == 'AVAILABLE'
assert res[1]['state'] == 'AVAILABLE'
assert res[2]['state'] == 'AVAILABLE'
del_rse(rse_id)
@pytest.mark.noparallel(reason='uses pre-defined RSEs, truncates table(s) updated_col_rep')
class TestDatasetReplicaUpdate(unittest.TestCase):
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.scope = InternalScope('mock', **self.vo)
self.rse = 'MOCK4'
self.rse2 = 'MOCK3'
self.account = InternalAccount('root', **self.vo)
self.rse_id = get_rse_id(self.rse, **self.vo)
self.rse2_id = get_rse_id(self.rse2, **self.vo)
self.db_session = session.get_session()
def tearDown(self):
self.db_session.commit() # pylint: disable=no-member
def test_clean_and_get_collection_replica_updates(self):
""" REPLICA (CORE): Get cleaned update requests for collection replicas. """
dataset_name_with_collection_replica = 'dataset_with_rse%s' % generate_uuid()
dataset_name_without_collection_replica = 'dataset_without_rse%s' % generate_uuid()
add_dids(dids=[{'name': dataset_name_without_collection_replica, 'scope': self.scope, 'type': constants.DIDType.DATASET},
{'name': dataset_name_with_collection_replica, 'scope': self.scope, 'type': constants.DIDType.DATASET}], account=self.account, session=self.db_session)
self.db_session.query(models.UpdatedCollectionReplica).delete() # pylint: disable=no-member
self.db_session.commit() # pylint: disable=no-member
# setup test data - 4 without corresponding replica, 4 duplicates and 2 correct
models.CollectionReplica(rse_id=self.rse_id, scope=self.scope, bytes=10, length=0, available_replicas_cnt=0, state=constants.ReplicaState.AVAILABLE, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET)\
.save(session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name_without_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name_without_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name_without_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name_without_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name_with_collection_replica, did_type=constants.DIDType.DATASET).save(session=self.db_session)
cleaned_collection_replica_updates = get_cleaned_updated_collection_replicas(total_workers=0, worker_number=0, session=self.db_session)
assert len(cleaned_collection_replica_updates) == 2
for update_request in cleaned_collection_replica_updates:
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).one() # pylint: disable=no-member
assert update_request.scope == self.scope
assert update_request.name in (dataset_name_with_collection_replica, dataset_name_without_collection_replica)
def test_update_collection_replica(self):
""" REPLICA (CORE): Update collection replicas from update requests. """
file_size = 2
files = [{'name': 'file_%s' % generate_uuid(), 'scope': self.scope, 'bytes': file_size} for i in range(0, 2)]
dataset_name = 'dataset_test_%s' % generate_uuid()
add_replicas(rse_id=self.rse_id, files=files, account=self.account, session=self.db_session)
add_did(scope=self.scope, name=dataset_name, type=constants.DIDType.DATASET, account=self.account, session=self.db_session)
attach_dids(scope=self.scope, name=dataset_name, dids=files, account=self.account, session=self.db_session)
models.CollectionReplica(rse_id=self.rse_id, scope=self.scope, state=constants.ReplicaState.AVAILABLE, name=dataset_name, did_type=constants.DIDType.DATASET, bytes=len(files) * file_size, length=len(files), available_replicas_cnt=0)\
.save(session=self.db_session)
# Update request with rse id
# First update -> dataset replica should be available
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(rse_id=self.rse_id, scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(id=update_request.id).first() # pylint: disable=no-member
assert update_request is None
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
# Delete one file replica -> dataset replica should be unavailable
delete_replicas(rse_id=self.rse_id, files=[files[0]], session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(rse_id=self.rse_id, scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.UNAVAILABLE
# Add one file replica -> dataset replica should be available again
add_replicas(rse_id=self.rse_id, files=[files[0]], account=self.account, session=self.db_session)
attach_dids(scope=self.scope, name=dataset_name, dids=[files[0]], account=self.account, session=self.db_session)
models.UpdatedCollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(rse_id=self.rse_id, scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
# Delete all file replicas -> dataset replica should be deleted
delete_replicas(rse_id=self.rse_id, files=files, session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(rse_id=self.rse_id, scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name).all() # pylint: disable=no-member
assert len(dataset_replica) == 0
# Update request without rse_id - using two replicas per file -> total 4 replicas
add_replicas(rse_id=self.rse_id, files=files, account=self.account, session=self.db_session)
add_replicas(rse_id=self.rse2_id, files=files, account=self.account, session=self.db_session)
attach_dids(scope=self.scope, name=dataset_name, dids=files, account=self.account, session=self.db_session)
models.CollectionReplica(rse_id=self.rse_id, scope=self.scope, name=dataset_name, state=constants.ReplicaState.UNAVAILABLE, did_type=constants.DIDType.DATASET, bytes=len(files) * file_size, length=len(files)).save(session=self.db_session)
models.CollectionReplica(rse_id=self.rse2_id, scope=self.scope, name=dataset_name, state=constants.ReplicaState.UNAVAILABLE, did_type=constants.DIDType.DATASET, bytes=len(files) * file_size, length=len(files)).save(session=self.db_session)
# First update -> replicas should be available
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter_by(scope=self.scope, name=dataset_name).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
for dataset_replica in self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name).all(): # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
# Delete first replica on first RSE -> replica on first RSE should be unavailable, replica on second RSE should be still available
delete_replicas(rse_id=self.rse_id, files=[files[0]], session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
# delete_replica creates also update object but with rse_id -> extra filter for rse_id is NULL
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.scope == self.scope, models.UpdatedCollectionReplica.name == dataset_name, # pylint: disable=no-member
models.UpdatedCollectionReplica.rse_id.is_(None)).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.UNAVAILABLE
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse2_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
# Set the state of the first replica on the second RSE to UNAVAILABLE -> both replicass should be unavailable
file_replica = self.db_session.query(models.RSEFileAssociation).filter_by(rse_id=self.rse2_id, scope=self.scope, name=files[0]['name']).one() # pylint: disable=no-member
file_replica.state = constants.ReplicaState.UNAVAILABLE
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.scope == self.scope, models.UpdatedCollectionReplica.name == dataset_name, # pylint: disable=no-member
models.UpdatedCollectionReplica.rse_id.is_(None)).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.UNAVAILABLE
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse2_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.UNAVAILABLE
# Delete first replica on second RSE -> file is not longer part of dataset -> both replicas should be available
delete_replicas(rse_id=self.rse2_id, files=[files[0]], session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.scope == self.scope, models.UpdatedCollectionReplica.name == dataset_name, # pylint: disable=no-member
models.UpdatedCollectionReplica.rse_id.is_(None)).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == (len(files) - 1) * file_size
assert dataset_replica['length'] == len(files) - 1
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.AVAILABLE
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse2_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == (len(files) - 1) * file_size
assert dataset_replica['length'] == len(files) - 1
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.AVAILABLE
# Add first replica to the first RSE -> first replicas should be available
add_replicas(rse_id=self.rse_id, files=[files[0]], account=self.account, session=self.db_session)
attach_dids(scope=self.scope, name=dataset_name, dids=[files[0]], account=self.account, session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.scope == self.scope, models.UpdatedCollectionReplica.name == dataset_name, # pylint: disable=no-member
models.UpdatedCollectionReplica.rse_id.is_(None)).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse2_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == (len(files) - 1) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files) - 1
assert dataset_replica['state'] == ReplicaState.UNAVAILABLE
# Add first replica to the second RSE -> both replicas should be available again
add_replicas(rse_id=self.rse2_id, files=[files[0]], account=self.account, session=self.db_session)
models.UpdatedCollectionReplica(scope=self.scope, name=dataset_name, did_type=constants.DIDType.DATASET).save(session=self.db_session)
update_request = self.db_session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.scope == self.scope, models.UpdatedCollectionReplica.name == dataset_name, # pylint: disable=no-member
models.UpdatedCollectionReplica.rse_id.is_(None)).one() # pylint: disable=no-member
update_collection_replica(update_request=update_request.to_dict(), session=self.db_session)
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
dataset_replica = self.db_session.query(models.CollectionReplica).filter_by(scope=self.scope, name=dataset_name, rse_id=self.rse2_id).one() # pylint: disable=no-member
assert dataset_replica['bytes'] == len(files) * file_size
assert dataset_replica['length'] == len(files)
assert dataset_replica['available_bytes'] == len(files) * file_size
assert dataset_replica['available_replicas_cnt'] == len(files)
assert dataset_replica['state'] == ReplicaState.AVAILABLE
|
import os
import sys
import shutil
import subprocess
import time
import re
from datetime import date
from datetime import datetime
num_procs = [6, 5, 4, 3, 2, 1]
path_dir = "/home/ashutosh/codes/dealii_code/examples/KLexpansion_results1/"
code_dir = "/home/ashutosh/codes/dealii_code/examples/KLexpansion"
path_dir_list = []
exect_command = []
for nprocs in num_procs:
path_dir_list.append(path_dir + '{0}'.format(nprocs))
exect_command.append("mpirun -n {0} klexpansion > out.log".format(nprocs))
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
for i in range(len(exect_command)):
os.chdir(code_dir)
print("***********************************************************************")
print("Launching new job. \n")
print("Code is stored in the dir:{}".format(code_dir))
print("Command executed on the shell is: {0} ".format(exect_command[i]))
print("Job has been launched on {0}.".format(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
p = subprocess.Popen([exect_command[i]], shell = True)
p.wait()
print("Job has been completed on {0} and output has been stored in out.log file.".format(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
print("Copying the data from code directory:{0} to result directory:{1}".format(code_dir, path_dir_list[i]))
shutil.copytree(code_dir, path_dir_list[i])
print("Cleaning the code directory and preparing it for new job.")
purge(code_dir, "Solution")
print("Putting the scheduler on sleep for 10 seconds.")
print("***********************************************************************")
time.sleep(10)
|
import heapq as hq
import scipy as sp
import numpy as np
from openpnm.algorithms import GenericAlgorithm
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class InvasionPercolation(GenericAlgorithm):
r"""
A classic/basic invasion percolation algorithm optimized for speed.
Parameters
----------
network : OpenPNM Network object
The Network upon which the invasion will occur.
Notes
----
This algorithm uses a binary heap to store all a list of all accessible
throats, sorted according to entry pressure. This means that item [0] in
the heap is the most easily invaded throat, so looking up which throat
to invade next is computationally trivial. In order to keep the list
sorted new throats to the list takes more time, however, the heap data
structure is very efficient at this. Interested users can consult the
wikipedia page on `binary heaps
<https://en.wikipedia.org/wiki/Binary_heap>`_ for more information.
Examples
--------
Start by importing the usual packages:
>>> import openpnm as op
>>> import scipy as sp
>>> import matplotlib.pyplot as plt
Create 2D cubic network for easier visualizaiton:
>>> S = sp.array([100, 100, 1])
>>> pn = op.network.Cubic(shape=S, spacing=0.0001, name='pn11')
Add a basic geometry:
>>> geom = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)
Create an invading phase, and attach the capillary pressure model:
>>> water = op.phases.Water(network=pn)
>>> water.add_model(propname='throat.entry_pressure',
... model=op.models.physics.capillary_pressure.washburn)
Initialize an invasion percolation object and define inlets:
>>> ip = op.algorithms.InvasionPercolation(network=pn)
>>> ip.setup(phase=water)
>>> ip.set_inlets(pores=0)
>>> ip.run()
After running the algorithm the invading phase configuration at a given
saturation can be obtained and assigned to the phase object:
>>> water.update(ip.results(Snwp=0.5))
Because it was a 2D network it's easy to quickly visualize the invasion
pattern as an image for verification:
.. note::
Because the network is 2D and cubic, an image can be generated with
color corresponding to a value. The following plots the entire
invasion sequence, and the water configuraiton at Snwp = 0.5.
``plt.subplot(1, 2, 1)``
``plt.imshow(sp.reshape(ip['pore.invasion_sequence'], newshape=S[S > 1]))``
``plt.subplot(1, 2, 2)``
``plt.imshow(sp.reshape(water['pore.occupancy'], newshape=S[S > 1]))``
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'pore_volume': 'pore.volume',
'throat_volume': 'throat.volume',
'entry_pressure': 'throat.entry_pressure',
'gui': {'setup': {'phase': None,
'entry_pressure': '',
'pore_volume': '',
'throat_volume': ''},
'set_inlets': {'pores': None,
'overwrite': False},
'apply_trapping': {'outlets': None}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''):
r"""
Set up the required parameters for the algorithm
Parameters
----------
phase : OpenPNM Phase object
The phase to be injected into the Network. The Phase must have the
capillary entry pressure values for the system.
entry_pressure : string
The dictionary key to the capillary entry pressure. If none is
supplied then the current value is retained. The default is
'throat.capillary_pressure'.
pore_volume : string
The dictionary key to the pore volume. If none is supplied then
the current value is retained. The default is 'pore.volume'.
throat_volume : string
The dictionary key to the throat volume. If none is supplied then
the current value is retained. The default is 'throat.volume'.
"""
self.settings['phase'] = phase.name
if pore_volume:
self.settings['pore_volume'] = pore_volume
if throat_volume:
self.settings['throat_volume'] = throat_volume
if entry_pressure:
self.settings['entry_pressure'] = entry_pressure
# Setup arrays and info
self['throat.entry_pressure'] = phase[self.settings['entry_pressure']]
# Indices into t_entry giving a sorted list
self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0)
self['throat.order'] = 0
self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt)
self['throat.invasion_sequence'] = -1
self['pore.invasion_sequence'] = -1
self._tcount = 0
def set_inlets(self, pores=[], overwrite=False):
r"""
Parameters
----------
pores : array_like
The list of inlet pores from which the Phase can enter the Network
"""
if overwrite:
self['pore.invasion_sequence'] = -1
self['pore.invasion_sequence'][pores] = 0
# Perform initial analysis on input pores
Ts = self.project.network.find_neighbor_throats(pores=pores)
self.queue = []
[hq.heappush(self.queue, T) for T in self['throat.order'][Ts]]
def run(self, n_steps=None):
r"""
Perform the algorithm
Parameters
----------
n_steps : int
The number of throats to invaded during this step
"""
if n_steps is None:
n_steps = sp.inf
queue = self.queue
if len(queue) == 0:
logger.warn('queue is empty, this network is fully invaded')
return
t_sorted = self['throat.sorted']
t_order = self['throat.order']
t_inv = self['throat.invasion_sequence']
p_inv = self['pore.invasion_sequence']
count = 0
while (len(queue) > 0) and (count < n_steps):
# Find throat at the top of the queue
t = hq.heappop(queue)
# Extract actual throat number
t_next = t_sorted[t]
t_inv[t_next] = self._tcount
# If throat is duplicated
while len(queue) > 0 and queue[0] == t:
# Note: Preventing duplicate entries below might save some time
t = hq.heappop(queue)
# Find pores connected to newly invaded throat
Ps = self.project.network['throat.conns'][t_next]
# Remove already invaded pores from Ps
Ps = Ps[p_inv[Ps] < 0]
if len(Ps) > 0:
p_inv[Ps] = self._tcount
Ts = self.project.network.find_neighbor_throats(pores=Ps)
Ts = Ts[t_inv[Ts] < 0] # Remove invaded throats from Ts
[hq.heappush(queue, T) for T in t_order[Ts]]
count += 1
self._tcount += 1
self['throat.invasion_sequence'] = t_inv
self['pore.invasion_sequence'] = p_inv
def results(self, Snwp=None):
r"""
Returns the phase configuration at the specified non-wetting phase
(invading phase) saturation.
Parameters
----------
Snwp : scalar, between 0 and 1
The network saturation for which the phase configuration is
desired.
Returns
-------
Two dictionary containing arrays that describe the pore and throat
distribution at the given saturation. Specifically, these are:
**'pore.occupancy'** : 1 indicates the pores is invaded and 0
otherwise.
**'throat.occupancy'** : Same as described above but for throats.
"""
if Snwp is None:
Np = self['pore.invasion_sequence']
Nt = self['throat.invasion_sequence']
data = {'pore.invasion_sequence': Np,
'throat.invasion_sequence': Nt}
else:
net = self.project.network
P12 = net['throat.conns']
# Fetch void volume for pores and throats
Vp = net[self.settings['pore_volume']]
Vt = net[self.settings['throat_volume']]
# Fetch the order of filling
Np = self['pore.invasion_sequence']
Nt = self['throat.invasion_sequence']
# Create Nt-long mask of which pores were filled when throat was filled
Pinv = (Np[P12].T == Nt).T
# If a pore and throat filled together, find combined volume
Vinv = sp.vstack(((Pinv*Vp[P12]).T, Vt)).T
Vinv = sp.sum(Vinv, axis=1)
# Convert to cumulative volume filled as each throat is invaded
x = sp.argsort(Nt) # Find order throats were invaded
Vinv_cum = np.cumsum(Vinv[x])
# Normalized cumulative volume filled into saturation
S = Vinv_cum/(Vp.sum() + Vt.sum())
# Find throat invasion step where Snwp was reached
try:
N = sp.where(S < Snwp)[0][-1]
except:
N = -np.inf
data = {'pore.occupancy': Np <= N, 'throat.occupancy': Nt <= N}
return data
def apply_trapping(self, outlets):
"""
Apply trapping based on algorithm described by Y. Masson [1].
It is applied as a post-process and runs the percolation algorithm in
reverse assessing the occupancy of pore neighbors. Consider the
following scenario when running standard IP without trapping,
3 situations can happen after each invasion step:
* The number of defending clusters stays the same and clusters can
shrink
* A cluster of size one is suppressed
* A cluster is split into multiple clusters
In reverse the following opposite situations can happen:
* The number of defending clusters stays the same and clusters can
grow
* A cluster of size one is created
* Mutliple clusters merge into one cluster
With trapping the reversed rules are adjusted so that only clusters
that do not connect to a sink can grow and merge. At the point that a
neighbor connected to a sink is touched the trapped cluster stops
growing as this is the point of trapping in forward invasion time.
Logger info displays the invasion sequence and pore index and a message
with condition number based on the modified trapping rules and the
assignment of the pore to a given cluster.
Initially all invaded pores are given cluster label -1
Outlets / Sinks are given -2
New clusters that grow into fully trapped clusters are either
identified at the point of breakthrough or grow from nothing if the
full invasion sequence is run, they are assigned numbers from 0 up.
Ref:
[1] Masson, Y., 2016. A fast two-step algorithm for invasion
percolation with trapping. Computers & Geosciences, 90, pp.41-48
Parameters
----------
outlets : list or array of pore indices for defending fluid to escape
through
Returns
-------
Creates a throat array called 'pore.clusters' in the Algorithm
dictionary. Any positive number is a trapped cluster
Also creates 2 boolean arrays Np and Nt long called '<element>.trapped'
"""
# First see if network is fully invaded
net = self.project.network
invaded_ps = self['pore.invasion_sequence'] > -1
if ~np.all(invaded_ps):
# Put defending phase into clusters
clusters = net.find_clusters2(~invaded_ps)
# Identify clusters that are connected to an outlet and set to -2
# -1 is the invaded fluid
# -2 is the defender fluid able to escape
# All others now trapped clusters which grow as invasion is reversed
out_clusters = sp.unique(clusters[outlets])
for c in out_clusters:
if c >= 0:
clusters[clusters == c] = -2
else:
# Go from end
clusters = np.ones(net.Np, dtype=int)*-1
clusters[outlets] = -2
# Turn into a list for indexing
inv_seq = np.vstack((self['pore.invasion_sequence'].astype(int),
np.arange(0, net.Np, dtype=int))).T
# Reverse sort list
inv_seq = inv_seq[inv_seq[:, 0].argsort()][::-1]
next_cluster_num = np.max(clusters)+1
# For all the steps after the inlets are set up to break-through
# Reverse the sequence and assess the neighbors cluster state
stopped_clusters = np.zeros(net.Np, dtype=bool)
all_neighbors = net.find_neighbor_pores(net.pores(), flatten=False,
include_input=True)
for un_seq, pore in inv_seq:
if pore not in outlets and un_seq > 0: # Skip inlets and outlets
nc = clusters[all_neighbors[pore]] # Neighboring clusters
unique_ns = np.unique(nc[nc != -1]) # Unique Neighbors
seq_pore = "S:"+str(un_seq)+" P:"+str(pore)
if np.all(nc == -1):
# This is the start of a new trapped cluster
clusters[pore] = next_cluster_num
next_cluster_num += 1
msg = (seq_pore+" C:1 new cluster number: " +
str(clusters[pore]))
logger.info(msg)
elif len(unique_ns) == 1:
# Grow the only connected neighboring cluster
if not stopped_clusters[unique_ns[0]]:
clusters[pore] = unique_ns[0]
msg = (seq_pore+" C:2 joins cluster number: " +
str(clusters[pore]))
logger.info(msg)
else:
clusters[pore] = -2
elif -2 in unique_ns:
# We have reached a sink neighbor, stop growing cluster
msg = (seq_pore+" C:3 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growth and merging
stopped_clusters[unique_ns[unique_ns > -1]] = True
else:
# We might be able to do some merging
# Check if any stopped clusters are neighbors
if np.any(stopped_clusters[unique_ns]):
msg = (seq_pore+" C:4 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growing all neighboring clusters
stopped_clusters[unique_ns] = True
else:
# Merge multiple un-stopped trapped clusters
new_num = unique_ns[0]
clusters[pore] = new_num
for c in unique_ns:
clusters[clusters == c] = new_num
msg = (seq_pore + " C:5 merge clusters: " +
str(c) + " into "+str(new_num))
logger.info(msg)
# And now return clusters
self['pore.clusters'] = clusters
logger.info("Number of trapped clusters" +
str(np.sum(np.unique(clusters) >= 0)))
self['pore.trapped'] = self['pore.clusters'] > -1
trapped_ts = net.find_neighbor_throats(self['pore.trapped'])
self['throat.trapped'] = np.zeros([net.Nt], dtype=bool)
self['throat.trapped'][trapped_ts] = True
self['pore.invasion_sequence'][self['pore.trapped']] = -1
self['throat.invasion_sequence'][self['throat.trapped']] = -1
|
import pytest
import numpy as np
from .._sky_bounds import get_rough_sky_bounds, radec_to_uv
from ...wcs import FastHashingWCS
@pytest.fixture
def bnds_data():
wcs = FastHashingWCS(dict(
naxis1=2048,
naxis2=4096,
equinox=2000.00000000,
radesys='ICRS ',
ctype1='RA---Tpv',
ctype2='DEC--Tpv',
cunit1='deg ',
cunit2='deg ',
crval1=3.204939506590E+02,
crval2=6.143643163701E-01,
crpix1=1.116880000000E+04,
crpix2=-4.341000000000E+03,
cd1_1=-1.508740062706E-07,
cd1_2=7.285654526391E-05,
cd2_1=-7.285164861781E-05,
cd2_2=-1.494473487673E-07,
pv1_0=-3.490695800309E-03,
pv1_1=1.014642131611E+00,
pv1_2=6.838988292402E-03,
pv1_4=-1.064604424650E-02,
pv1_5=-1.671893935475E-02,
pv1_6=-7.265386507767E-03,
pv1_7=1.377291873667E-03,
pv1_8=8.590284815313E-03,
pv1_9=5.428141605600E-03,
pv1_10=2.582395791405E-03,
pv2_0=-4.565360201645E-03,
pv2_1=1.015176020471E+00,
pv2_2=1.033041271404E-02,
pv2_4=-1.145876554837E-02,
pv2_5=-2.100570193619E-02,
pv2_6=-9.155231884000E-03,
pv2_7=1.907737076892E-03,
pv2_8=1.141155239105E-02,
pv2_9=6.262968730573E-03,
pv2_10=2.846317319947E-03))
position_offset = 1
sky_bnds, ra_ccd, dec_ccd = get_rough_sky_bounds(
im_shape=wcs.get_naxis()[::-1],
wcs=wcs,
position_offset=position_offset,
bounds_buffer_uv=16.0,
n_grid=4)
return wcs, position_offset, sky_bnds, ra_ccd, dec_ccd
def test_get_rough_sky_bounds_smoke(bnds_data):
wcs, position_offset, sky_bnds, ra_ccd, dec_ccd = bnds_data
ncol, nrow = wcs.get_naxis()
row, col = np.mgrid[0:nrow+64:64, 0:ncol+64:64]
row = row.ravel()
col = col.ravel()
ra, dec = wcs.image2sky(
x=col + position_offset,
y=row + position_offset)
u, v = radec_to_uv(ra, dec, ra_ccd, dec_ccd)
assert np.all(sky_bnds.contains_points(u, v))
# dither things a bit too
for col_dither in [-0.51, 0.51]:
for row_dither in [-0.51, 0.52]:
ra, dec = wcs.image2sky(
x=col + position_offset + col_dither,
y=row + position_offset + row_dither)
u, v = radec_to_uv(ra, dec, ra_ccd, dec_ccd)
assert np.all(sky_bnds.contains_points(u, v))
@pytest.mark.parametrize(
'offset,outside',
[(45, False),
(75, True)])
def test_get_rough_sky_bounds_edge_buffer(bnds_data, offset, outside):
# the buffer above is 16 arcsec
# the wcs above is from DES
# thus we want 16 / 0.263 ~ 61 pixels
# so we test inside at 45 and outside at 75
wcs, position_offset, sky_bnds, ra_ccd, dec_ccd = bnds_data
ncol, nrow = wcs.get_naxis()
row_t = np.linspace(0, nrow, (nrow + 64) // 64)
col_t = np.ones(nrow // 64 + 1) * 0
row_b = np.linspace(0, nrow, (nrow + 64) // 64)
col_b = np.ones(nrow // 64 + 1) * ncol
row_l = np.ones(ncol // 64 + 1) * 0
col_l = np.linspace(0, ncol, (ncol + 64) // 64)
row_r = np.ones(ncol // 64 + 1) * nrow
col_r = np.linspace(0, ncol, (ncol + 64) // 64)
def _test_it(row, col, col_dither, row_dither, outside=False):
ra, dec = wcs.image2sky(
x=col + position_offset + col_dither,
y=row + position_offset + row_dither)
u, v = radec_to_uv(ra, dec, ra_ccd, dec_ccd)
if outside:
assert np.all(~sky_bnds.contains_points(u, v))
else:
assert np.all(sky_bnds.contains_points(u, v))
_test_it(row_t, col_t, -offset, 0, outside=outside)
_test_it(row_b, col_b, offset, 0, outside=outside)
_test_it(row_l, col_l, 0, -offset, outside=outside)
_test_it(row_r, col_r, 0, offset, outside=outside)
|
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import helper_methods
# Get CLI args
import sys
if len(sys.argv) != 4:
print("Usage: <input_file> <output_dir> <output_name>")
exit(1)
INPUT_CSV = sys.argv[1]
OUTPUT_DIR = sys.argv[2]
OUTPUT_NAME = sys.argv[3]
OUTPUT_TYPE = "png"
# Get data
data = pd.read_csv(INPUT_CSV)
# Sort by frames in order
data = data.sort_values(by=["frame"], ascending=True)
# Filter rows
primitives = data['primitive'].unique()
for primitive in primitives:
# Make subplots
fig, ax = plt.subplots()
# Get rows for this primitive
rows = data[data["primitive"] == primitive]
num_rows = len(rows)
# Make plot
# Get chart data
frame_times = []
for frame in range(num_rows):
frame_time_nanos = rows['frame_time'].values[frame]
frame_time_ms = helper_methods.ns_to_ms(
frame_time_nanos, rounding=False)
frame_times.append(frame_time_ms)
# Get table data
decimals = 4
min_frametime = round(np.amin(frame_times), decimals)
max_frametime = round(np.amax(frame_times), decimals)
med_frametime = round(np.median(frame_times), decimals)
mean_frametime = round(np.mean(frame_times), decimals)
std_dev_frametime = round(np.std(frame_times), decimals)
# Plot frame times
line = ax.plot(rows["frame"].unique(), frame_times,
linewidth=3, alpha=0.5, color="grey")
# Plot lowest-line
ax.axhline(y=min_frametime, color='blue', linestyle='--')
# Dress plot
ax.set_xlabel("Frame")
ax.set_ylabel("Total time (ms)")
ax.set_title(
f"Continuous frame-times of {primitive}, naive")
ax.yaxis.grid()
plt.tight_layout()
# Make table
table_vals = [['Domain (y)', f"{{ y | {min_frametime} ≤ y ≤ {max_frametime} }} ms"],
['Median (ỹ)', f"{med_frametime} ms"],
['Mean (ȳ)', f"{mean_frametime} ms"],
['Std. Deviation (σ)', f"{std_dev_frametime} ms"]]
# Make room for table
space = 0.35 # Percent of area used for table
fig.subplots_adjust(bottom=space)
# Add table
the_table = plt.table(cellText=table_vals,
colWidths=[0.4, 0.6],
bbox=[0.1, -space * 1.6, 0.8, space])
# Save plot
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
fig.savefig(
f"{OUTPUT_DIR}/{OUTPUT_NAME}_{primitive}.{OUTPUT_TYPE}", dpi=500)
|
# fmt: off
import logging
import os
import pprint
from pathlib import Path
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.processor import SquadProcessor
from farm.data_handler.utils import write_squad_predictions
from farm.infer import QAInferencer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.optimization import initialize_optimizer
from farm.modeling.prediction_head import QuestionAnsweringHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings
def question_answering():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(experiment_name="Public_FARM", run_name="Run_question_answering")
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
batch_size = 24
n_epochs = 2
evaluate_every = 2000
lang_model = "roberta-base"
do_lower_case = False # roberta is a cased model
train_filename = "train-v2.0.json"
dev_filename = "dev-v2.0.json"
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model, do_lower_case=do_lower_case
)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
label_list = ["start_token", "end_token"]
metric = "squad"
processor = SquadProcessor(
tokenizer=tokenizer,
max_seq_len=384,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=None,
data_dir=Path("../data/squad20"),
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
# NOTE: In FARM, the dev set metrics differ from test set metrics in that they are calculated on a token level instead of a word level
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=False)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model)
# b) and a prediction head on top that is suited for our task => Question Answering
prediction_head = QuestionAnsweringHead()
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_token"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=3e-5,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": 0.2},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=device
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/bert-english-qa-tutorial")
model.save(save_dir)
processor.save(save_dir)
# 9. Load it & harvest your fruits (Inference)
QA_input = [
{
"qas": ["Who counted the game among the best ever made?"],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
}]
model = QAInferencer.load(save_dir, batch_size=40, gpu=True)
result = model.inference_from_dicts(dicts=QA_input)[0]
pprint.pprint(result)
# 10. Do Inference on whole SQuAD Dataset & write the predictions file to disk
filename = os.path.join(processor.data_dir,processor.dev_filename)
result = model.inference_from_file(file=filename, return_json=False)
result_squad = [x.to_squad_eval() for x in result]
write_squad_predictions(
predictions=result_squad,
predictions_filename=filename,
out_filename="predictions.json"
)
# 11. Get final evaluation metric using the official SQuAD evaluation script
# To evaluate the model's performance on the SQuAD dev set, run the official squad eval script
# (farm/squad_evaluation.py) in the command line with something like the command below.
# This is necessary since the FARM evaluation during training is done on the token level.
# This script performs word level evaluation and will generate metrics that are comparable
# to the SQuAD leaderboard and most other frameworks:
# python squad_evaluation.py path/to/squad20/dev-v2.0.json path/to/predictions.json
if __name__ == "__main__":
question_answering()
|
# Generated by Django 3.1.8 on 2021-04-12 03:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest_app', '0004_auto_20210121_0300'),
]
operations = [
migrations.AlterField(
model_name='article',
name='rating',
field=models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5')], default='5', max_length=2),
),
]
|
""" OSXMetaData class to read and write various Mac OS X metadata
such as tags/keywords and Finder comments from files """
import base64
import datetime
import json
import logging
import os.path
import pathlib
import plistlib
# plistlib creates constants at runtime which causes pylint to complain
from plistlib import FMT_BINARY # pylint: disable=E0611
import applescript
import xattr
from ._version import __version__
from .attributes import ATTRIBUTES, Attribute, validate_attribute_value
from .classes import _AttributeOSXPhotosDetectedText, _AttributeList, _AttributeTagsList
from .constants import (
_FINDER_COMMENT_NAMES,
FINDER_COLOR_NONE,
FinderInfo,
_kMDItemUserTags,
)
from .datetime_utils import (
datetime_naive_to_utc,
datetime_remove_tz,
datetime_utc_to_local,
)
from .debug import _debug, _get_logger, _set_debug
from .findertags import Tag, get_tag_color_name
# TODO: What to do about colors
# TODO: check what happens if OSXMetaData.__init__ called with invalid file--should result in error but saw one case where it didn't
# TODO: cleartags does not always clear colors--this is a new behavior, did Mac OS change something in implementation of colors?
# all attributes that are part of com.apple.FinderInfo
_FINDERINFO_ATTRIBUTES = ["finderinfo", "findercolor", "stationarypad"]
# all attributes that use an Attribute class (defined in attributes.py)
_ATTRIBUTE_CLASS_ATTRIBUTES = [
"tags",
"osxphotos_detected_text",
*_FINDERINFO_ATTRIBUTES,
]
# all attributes related to stationary pad
_FINDERINFO_STATIONARYPAD_ATTRIBUTES = ["finderinfo", "stationarypad"]
# all attributes related to Finder color
_FINDERINFO_COLOR_ATTRIBUTES = ["finderinfo", "findercolor"]
# all attributes that are included in finderinfo
_FINDERINFO_SUB_ATTRIBUTES = ["findercolor", "stationarypad"]
# AppleScript for manipulating Finder comments
_scpt_set_finder_comment = applescript.AppleScript(
"""
on run {path, fc}
set thePath to path
set theComment to fc
tell application "Finder" to set comment of (POSIX file thePath as alias) to theComment
end run
"""
)
_scpt_clear_finder_comment = applescript.AppleScript(
"""
on run {path}
set thePath to path
set theComment to missing value
tell application "Finder" to set comment of (POSIX file thePath as alias) to theComment
end run
"""
)
class OSXMetaData:
"""Create an OSXMetaData object to access file metadata"""
__slots__ = [
"__init",
"_attrs",
"_fname",
"_posix_name",
"_tz_aware",
"authors",
"comment",
"copyright",
"creator",
"description",
"downloadeddate",
"duedate",
"findercolor",
"findercomment",
"finderinfo",
"headline",
"keywords",
"participants",
"projects",
"rating",
"stationary",
"stationarypad",
"tags",
"version",
"wherefroms",
"osxphotos_detected_text",
]
def __init__(self, fname, tz_aware=False):
"""Create an OSXMetaData object to access file metadata
fname: filename to operate on
timezone_aware: bool; if True, date/time attributes will return
timezone aware datetime.datetime attributes; if False (default)
date/time attributes will return timezone naive objects"""
self._fname = pathlib.Path(fname)
self._posix_name = self._fname.resolve().as_posix()
self._tz_aware = tz_aware
if not self._fname.exists():
raise FileNotFoundError("file does not exist: ", fname)
self._attrs = xattr.xattr(self._fname)
# create property classes for the multi-valued attributes
# tags get special handling due to color labels
# ATTRIBUTES contains both long and short names, want only the short names (attribute.name)
for name in {attribute.name for attribute in ATTRIBUTES.values()}:
attribute = ATTRIBUTES[name]
if attribute.class_ not in [str, float, int, bool, datetime.datetime]:
super().__setattr__(
name, attribute.class_(attribute, self._attrs, self)
)
# Done with initialization
self.__init = True
@property
def name(self):
"""POSIX path of the file OSXMetaData is operating on"""
return self._fname.resolve().as_posix()
@property
def tz_aware(self):
"""returns the timezone aware flag"""
return self._tz_aware
@tz_aware.setter
def tz_aware(self, tz_flag):
"""sets the timezone aware flag
tz_flag: bool"""
self._tz_aware = tz_flag
def asdict(self, all_=False, encode=True):
"""Return dict with all attributes for this file
Args:
all_: bool, if True, returns all attributes including those that osxmetadata knows nothing about
encode: bool, if True, encodes values for unknown attributes with base64, otherwise leaves the values as raw bytes
Returns:
dict with attributes for this file
"""
attribute_list = self._list_attributes() if all_ else self.list_metadata()
dict_data = {
"_version": __version__,
"_filepath": self._posix_name,
"_filename": self._fname.name,
}
for attr in attribute_list:
try:
attribute = ATTRIBUTES[attr]
if attribute.name == "tags":
tags = self.get_attribute(attribute.name)
value = [[tag.name, tag.color] for tag in tags]
dict_data[attribute.constant] = value
elif attribute.constant == FinderInfo:
value = self.finderinfo.asdict()
dict_data[attribute.constant] = value
elif attribute.type_ == datetime.datetime:
# need to convert datetime.datetime to string to serialize
value = self.get_attribute(attribute.name)
if type(value) == list:
value = [v.isoformat() for v in value]
else:
value = value.isoformat()
dict_data[attribute.constant] = value
else:
# get raw value
dict_data[attribute.constant] = self.get_attribute(attribute.name)
except KeyError:
# an attribute osxmetadata doesn't know about
if all_:
try:
value = self._attrs[attr]
# convert value to base64 encoded ascii
if encode:
value = base64.b64encode(value).decode("ascii")
dict_data[attr] = value
except KeyError as e:
# value disappeared between call to _list_attributes and now
pass
return dict_data
def to_json(self, all_=False):
"""Returns a string in JSON format for all attributes in this file
Args:
all_: bool; if True, also restores attributes not known to osxmetadata (generated with asdict(all_=True, encode=True) )
"""
dict_data = self.asdict(all_=all_)
return json.dumps(dict_data)
def _restore_attributes(self, attr_dict, all_=False):
"""restore attributes from attr_dict
for each attribute in attr_dict, will set the attribute
will not clear/erase any attributes on file that are not in attr_dict
Args:
attr_dict: an attribute dict as produced by OSXMetaData.asdict()
all_: bool; if True, also restores attributes not known to osxmetadata (generated with asdict(all_=True, encode=True) )
"""
for key, val in attr_dict.items():
if key.startswith("_"):
# skip private keys like _version and _filepath
continue
try:
if key == _kMDItemUserTags:
if not isinstance(val, list):
raise TypeError(
f"expected list for attribute {key} but got {type(val)}"
)
self.set_attribute(key, [Tag(*tag_val) for tag_val in val])
elif key == FinderInfo:
if not isinstance(val, dict):
raise TypeError(
f"expected dict for attribute {key} but got {type(val)}"
)
self.set_attribute(key, val)
elif key in ATTRIBUTES:
self.set_attribute(key, val)
elif all_:
self._attrs.set(key, base64.b64decode(val))
except Exception as e:
logging.warning(
f"Unable to restore attribute {key} for {self._fname}: {e}"
)
def get_attribute(self, attribute_name):
"""load attribute and return value or None if attribute was not set
attribute_name: name of attribute
"""
attribute = ATTRIBUTES[attribute_name]
# user tags and finderinfo need special processing
if attribute.name in _ATTRIBUTE_CLASS_ATTRIBUTES:
return getattr(self, attribute.name).get_value()
# must be a "normal" metadata attribute
try:
plist = plistlib.loads(self._attrs[attribute.constant])
except KeyError:
plist = None
# add UTC to any datetime.datetime objects because that's how MacOS stores them
# In the plist associated with extended metadata attributes, times are stored as:
# <date>2020-04-14T14:49:22Z</date>
if plist and isinstance(plist, list):
if isinstance(plist[0], datetime.datetime):
plist = [datetime_naive_to_utc(d) for d in plist]
if not self._tz_aware:
# want datetimes in naive format
plist = [
datetime_remove_tz(d_local)
for d_local in [datetime_utc_to_local(d_utc) for d_utc in plist]
]
elif isinstance(plist, datetime.datetime):
plist = datetime_naive_to_utc(plist)
if not self._tz_aware:
# want datetimes in naive format
plist = datetime_remove_tz(datetime_utc_to_local(plist))
if attribute.as_list and isinstance(plist, list):
return plist[0]
else:
return plist
def get_attribute_str(self, attribute_name):
"""returns a string representation of attribute value
e.g. if attribute is a datedate.datetime object, will
format using datetime.isoformat()
attribute_name: name of attribute"""
attribute = ATTRIBUTES[attribute_name]
value = self.get_attribute(attribute_name)
if attribute.class_ == _AttributeOSXPhotosDetectedText:
return json.dumps(value)
if type(value) in [list, set]:
if value:
if type(value[0]) == datetime.datetime:
new_value = [v.isoformat() for v in value]
return str(new_value)
elif isinstance(value[0], Tag):
new_value = [
f"{tag.name},{get_tag_color_name(tag.color)}"
if tag.color
else f"{tag.name}"
for tag in value
]
return str(new_value)
return [str(val) for val in value]
else:
if type(value) == datetime.datetime:
return value.isoformat()
elif isinstance(value, Tag):
return (
f"{value.name},{get_tag_color_name(value.color)}"
if value.color
else f"{value.name}"
)
return str(value)
def set_attribute(self, attribute_name, value):
"""write attribute to file with value
attribute_name: an osxmetadata Attribute name
value: value to store in attribute"""
attribute = ATTRIBUTES[attribute_name]
# user tags and Finder info need special processing
if attribute.name in _ATTRIBUTE_CLASS_ATTRIBUTES:
return getattr(self, attribute.name).set_value(value)
# verify type is correct
value = validate_attribute_value(attribute, value)
if attribute.name in _FINDER_COMMENT_NAMES:
# Finder Comment needs special handling
# code following will also set the attribute for Finder Comment
self.set_finder_comment(self._posix_name, value)
elif attribute.class_ in [
_AttributeList,
_AttributeTagsList,
]:
getattr(self, attribute.name).set_value(value)
else:
# must be a normal scalar (e.g. str, float)
plist = plistlib.dumps(value, fmt=FMT_BINARY)
self._attrs.set(attribute.constant, plist)
def update_attribute(self, attribute_name, value):
"""Update attribute with union of itself and value
(this avoids adding duplicate values to attribute)
attribute: an osxmetadata Attribute name
value: value to append to attribute"""
return self.append_attribute(attribute_name, value, update=True)
def append_attribute(self, attribute_name, value, update=False):
"""append value to attribute
attribute_name: an osxmetadata Attribute name
value: value to append to attribute
update: (bool) if True, update instead of append (e.g. avoid adding duplicates)
(default is False)"""
attribute = ATTRIBUTES[attribute_name]
# start with existing values
new_value = self.get_attribute(attribute.name)
# user tags need special processing to normalize names
if attribute.name == "tags":
if not isinstance(new_value, list) or not isinstance(value, list):
raise TypeError(
f"tags expects values in list: {type(new_value)}, {type(value)}"
)
if update:
# verify not already in values
for val in value:
if val not in new_value:
new_value.append(val)
else:
new_value.extend(value)
return self.tags.set_value(new_value)
if attribute.name in _FINDERINFO_ATTRIBUTES:
raise ValueError(f"cannot append or update {attribute.name}")
value = validate_attribute_value(attribute, value)
if (
attribute.class_ == _AttributeOSXPhotosDetectedText
and new_value is None
or attribute.class_ != _AttributeOSXPhotosDetectedText
and attribute.list
and new_value is None
):
new_value = value
elif attribute.list:
new_value = list(new_value)
if update:
for val in value:
if val not in new_value:
new_value.append(val)
elif attribute.class_ == _AttributeOSXPhotosDetectedText:
for val in value:
new_value.append(val)
else:
new_value.extend(value)
else:
# scalar value
if update:
raise AttributeError(f"Cannot use update on {attribute.type_}")
if new_value is None:
new_value = value
else:
new_value += value
try:
if attribute.name in _FINDER_COMMENT_NAMES:
# Finder Comment needs special handling
self.set_finder_comment(self._posix_name, new_value)
elif attribute.class_ in [
_AttributeList,
_AttributeTagsList,
_AttributeOSXPhotosDetectedText,
]:
# if tags, set_value will normalize
getattr(self, attribute.name).set_value(new_value)
else:
plist = plistlib.dumps(new_value, fmt=FMT_BINARY)
self._attrs.set(attribute.constant, plist)
except Exception as e:
# todo: should catch this or not?
raise e
return new_value
def remove_attribute(self, attribute_name, value):
"""remove a value from attribute, raise ValueError if attribute does not contain value
only applies to multi-valued attributes, otherwise raises TypeError
attribute_name: name of OSXMetaData attribute"""
attribute = ATTRIBUTES[attribute_name]
if not attribute.list:
raise TypeError("remove only applies to multi-valued attributes")
if attribute.name == "tags":
# tags need special processing
self.tags.remove(value)
else:
values = self.get_attribute(attribute.name)
values = list(values)
values.remove(value)
self.set_attribute(attribute.name, values)
def discard_attribute(self, attribute_name, value):
"""remove a value from attribute, unlike remove, does not raise exception
if attribute does not contain value
only applies to multi-valued attributes, otherwise raises TypeError
attribute_name: name of OSXMetaData attribute"""
attribute = ATTRIBUTES[attribute_name]
if not attribute.list:
raise TypeError("discard only applies to multi-valued attributes")
values = self.get_attribute(attribute.name)
try:
values.remove(value)
self.set_attribute(attribute.name, values)
except Exception:
pass
def clear_attribute(self, attribute_name):
"""clear anttribute (remove it from the file)
attribute_name: name of OSXMetaData attribute"""
attribute = ATTRIBUTES[attribute_name]
try:
if attribute.name in _FINDER_COMMENT_NAMES:
# Finder Comment needs special handling
# code following will also clear the attribute for Finder Comment
self.clear_finder_comment(self._posix_name)
if attribute.name in ["tags", *_FINDERINFO_COLOR_ATTRIBUTES]:
# don't clear the entire FinderInfo attribute, just delete the bits we know about
self.finderinfo.set_finderinfo_color(FINDER_COLOR_NONE)
if attribute.name in _FINDERINFO_STATIONARYPAD_ATTRIBUTES:
self.finderinfo.set_finderinfo_stationarypad(False)
if attribute.name not in _FINDERINFO_ATTRIBUTES:
# remove the entire attribute
self._attrs.remove(attribute.constant)
except (IOError, OSError):
# TODO: fix this try/except handling
pass
def _list_attributes(self):
"""list all the attributes set on the file"""
return self._attrs.list()
def list_metadata(self):
"""list the Apple metadata attributes set on the file:
e.g. those in com.apple.metadata namespace"""
# also lists com.osxmetadata.test used for debugging
mdlist = self._list_attributes()
mdlist = [
md
for md in mdlist
if md.startswith("com.apple.metadata")
or md.startswith("com.apple.FinderInfo")
or md.startswith("com.osxmetadata.test")
or md.startswith("osxphotos.metadata")
]
return mdlist
def set_finder_comment(self, path, comment):
"""set finder comment for object path (file or directory)
path: path to file or directory in posix format
comment: comment string
"""
if not os.path.exists(path):
raise FileNotFoundError(f"Could not find {path}")
if comment:
_scpt_set_finder_comment.run(path, comment)
plist = plistlib.dumps(comment, fmt=FMT_BINARY)
self._attrs.set(ATTRIBUTES["findercomment"].constant, plist)
else:
self.clear_finder_comment(path)
def clear_finder_comment(self, path):
"""clear finder comment for object path (file or directory)
path: path to file or directory in posix format
"""
if not os.path.exists(path):
raise FileNotFoundError(f"Could not find {path}")
_scpt_clear_finder_comment.run(path)
try:
self._attrs.remove(ATTRIBUTES["findercomment"].constant)
except (IOError, OSError):
# exception raised if attribute not found and attempt to remove it
pass
def __getattr__(self, name):
"""if attribute name is in ATTRIBUTE dict, return the value"""
if name in ATTRIBUTES:
return self.get_attribute(name)
raise AttributeError(f"{name} is not an attribute")
def __setattr__(self, name, value):
"""if object is initialized and name is an attribute in ATTRIBUTES,
set the attribute to value
if value value is None, will clear (delete) the attribute and all associated values
if name is not a metadata attribute, assume it's a normal class attribute and pass to
super() to handle"""
try:
if self.__init:
# already initialized
attribute = ATTRIBUTES[name]
if value is None:
self.clear_attribute(attribute.name)
else:
self.set_attribute(attribute.name, value)
except (KeyError, AttributeError):
super().__setattr__(name, value)
|
import torch
from data import IntentDset
from model import ProtNet
from torch import nn, optim
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', default='lena', type=str)
parser.add_argument('--dev_data', default='moli', type=str)
parser.add_argument('--seq_len', default='10', type=int)
parser.add_argument('--train_batch_size', default='10', type=int)
parser.add_argument('--dev_batch_size', default='10', type=int)
opt = parser.parse_args()
# https://github.com/cyvius96/prototypical-network-pytorch/blob/master/utils.py
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits
N_c_tr = 57
N_c_te = 297
if opt.train_data =='lena':
N_c_tr = 57
N_c_te = 297
elif opt.train_data =='moli':
N_c_tr = 297
N_c_te = 57
N_i_tr = 1
N_i_te = 1
N_q_tr = 1
N_q_te = 1
idset = IntentDset(dataset = 'data', split = opt.train_data, Nc = N_c_tr, Ni = N_i_tr, n_query = N_q_tr, seq_len = opt.seq_len)
val_dset = IntentDset(dataset = 'data', split = opt.dev_data, Nc = N_c_te, Ni = N_i_te, n_query = N_q_te, seq_len = opt.seq_len)
pn = ProtNet().cuda()
pn = nn.DataParallel(pn)
pn = pn.cuda()
param_optimizer = list(pn.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=2e-5,
warmup=0.1,
t_total=10000)
criterion = nn.CrossEntropyLoss()
step = 0
best_accuracy = 0
best_step = 0
while True:
pn.train()
step += 1
train_batch_size = opt.train_batch_size
batch_num = math.ceil(len(idset.sents) / train_batch_size)
for i in range(batch_num):
begin = i * train_batch_size
end = min((i+1) * train_batch_size, len(idset.sents))
qry_batch = idset.sents[begin: end]
query_sens = []
query_label = []
for j in range(begin, end):
if idset.labels[j] not in idset.unqiue_labels:
qry_batch.remove(qry_batch[j % train_batch_size])
continue
idset.label_bins[idset.label2id[idset.labels[j]]].remove(idset.sents[j])
query_label.append(idset.label2id[idset.labels[j]])
query_sens.append(idset.sents[j])
target_input_ids = torch.tensor([f.input_ids for f in qry_batch], dtype=torch.long)
target_input_mask = torch.tensor([f.input_mask for f in qry_batch], dtype=torch.long)
qry_set = {}
qry_set['input_ids'] = target_input_ids
qry_set['input_mask'] = target_input_mask
batch, n_labels = idset.next_batch_test_full()
sup_set = batch['sup_set_x']
for j in range(len(query_sens)):
idset.label_bins[query_label[j]].append(query_sens[j])
sup = pn(sup_set['input_ids'].cuda(),sup_set['input_mask'].cuda())
qry = pn(qry_set['input_ids'].cuda(),qry_set['input_mask'].cuda())
sup = sup.view(n_labels, N_i_tr,-1).mean(1)
logits = euclidean_metric(qry, sup)
label = torch.tensor(query_label).type(torch.LongTensor).cuda()
loss = criterion(logits, label)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print('Iteration :',step,"Loss :",float(loss.item()))
if step%1 == 0:
pn.eval()
pn.cuda()
total = 0
correct = 0
for i in range(1):
dev_batch_size = opt.dev_batch_size
batch_num = math.ceil(len(val_dset.sents) / dev_batch_size)
for i in range(batch_num):
begin = i * dev_batch_size
end = min((i+1) * dev_batch_size, len(val_dset.sents))
qry_batch = val_dset.sents[begin: end]
query_sens = []
query_label = []
for j in range(begin, end):
if val_dset.labels[j] not in val_dset.unqiue_labels:
qry_batch.remove(qry_batch[j % dev_batch_size])
continue
val_dset.label_bins[val_dset.label2id[val_dset.labels[j]]].remove(val_dset.sents[j])
query_label.append(val_dset.label2id[val_dset.labels[j]])
query_sens.append(val_dset.sents[j])
target_input_ids = torch.tensor([f.input_ids for f in qry_batch], dtype=torch.long)
target_input_mask = torch.tensor([f.input_mask for f in qry_batch], dtype=torch.long)
qry_set = {}
qry_set['input_ids'] = target_input_ids
qry_set['input_mask'] = target_input_mask
batch, n_labels = val_dset.next_batch_test_full()
sup_set = batch['sup_set_x']
for j in range(len(query_sens)):
val_dset.label_bins[query_label[j]].append(query_sens[j])
sup = pn(sup_set['input_ids'].cuda(),sup_set['input_mask'].cuda())
qry = pn(qry_set['input_ids'].cuda(),qry_set['input_mask'].cuda())
sup = sup.view(n_labels, N_i_te,-1).mean(1)
logits = euclidean_metric(qry, sup).max(1)[1].cpu()
label = torch.tensor(query_label).type(torch.LongTensor)
correct += float(torch.sum(logits==label).item())
total += label.shape[0]
print(correct,'/',total)
print('Accuracy :',correct/total)
if (correct/total) > best_accuracy:
best_accuracy = correct/total
best_step = step
print('Best accuracy :', best_accuracy)
print('Best step :', best_step)
pn.cuda()
if step%100000 == 0:
break
|
import logging
import os
import subprocess
from pathlib import Path
log = logging.getLogger("iblrig")
IBLRIG_FOLDER = r"C:\iblrig"
CWD = os.getcwd()
BONSAI_FOLDER = Path(IBLRIG_FOLDER) / "Bonsai"
bns = BONSAI_FOLDER / "Bonsai64.exe"
if bns.exists():
bns = str(bns)
else:
bns = str(BONSAI_FOLDER / "Bonsai.exe")
certification_folder = Path(IBLRIG_FOLDER) / "visual_stim" / "ephys_certification"
wrkfl = str(certification_folder / "ephys_certification.bonsai")
SESSION_RAW_DATA_FOLDER = certification_folder
# Flags
noedit = "--no-editor" # implies start and no-debug?
noboot = "--no-boot"
start = "--start"
# Properties
SA0_DueTime = "-p:SpontaneousActivity0.DueTime=00:15:00"
ODS0_Count = "-p:OrientationDirectionSelectivityStim0.Count=20"
RFM_FileName = "-p:ReceptiveFieldMappingStim.FileNameRFMapStim=" + str(
Path(SESSION_RAW_DATA_FOLDER) / "_iblrig_RFMapStim.raw.bin"
)
RFM_MappingTime = "-p:ReceptiveFieldMappingStim.MappingTime=00:10:00"
CRCS_CheckerboardTime = "-p:ContrastReversingCheckerboardStim.CheckerboardTime=00:03:00"
CSTS_StimFileName = "-p:ContrastSelectivityTaskStim.StimFileName=" + str(
certification_folder / "Extensions" / "stims.csv"
)
SA1_DueTime = "-p:SpontaneousActivity1.DueTime=00:15:00"
ODS1_Count = "-p:OrientationDirectionSelectivityStim1.Count=20"
cmd = [
bns,
wrkfl,
noboot,
noedit,
SA0_DueTime,
SA1_DueTime,
RFM_FileName,
ODS0_Count,
ODS1_Count,
RFM_MappingTime,
CRCS_CheckerboardTime,
CSTS_StimFileName,
]
os.chdir(certification_folder)
s = subprocess.run(cmd, stdout=subprocess.PIPE) # locking call
os.chdir(CWD)
log.info("You're done, please remove the mouse.\n" * 42)
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Meta checkout dependency manager for Git."""
# Files
# .gclient : Current client configuration, written by 'config' command.
# Format is a Python script defining 'solutions', a list whose
# entries each are maps binding the strings "name" and "url"
# to strings specifying the name and location of the client
# module, as well as "custom_deps" to a map similar to the
# deps section of the DEPS file below, as well as
# "custom_hooks" to a list similar to the hooks sections of
# the DEPS file below.
# .gclient_entries : A cache constructed by 'update' command. Format is a
# Python script defining 'entries', a list of the names
# of all modules in the client
# <module>/DEPS : Python script defining var 'deps' as a map from each
# requisite submodule name to a URL where it can be found (via
# one SCM)
#
# Hooks
# .gclient and DEPS files may optionally contain a list named "hooks" to
# allow custom actions to be performed based on files that have changed in the
# working copy as a result of a "sync"/"update" or "revert" operation. This
# can be prevented by using --nohooks (hooks run by default). Hooks can also
# be forced to run with the "runhooks" operation. If "sync" is run with
# --force, all known but not suppressed hooks will run regardless of the state
# of the working copy.
#
# Each item in a "hooks" list is a dict, containing these two keys:
# "pattern" The associated value is a string containing a regular
# expression. When a file whose pathname matches the expression
# is checked out, updated, or reverted, the hook's "action" will
# run.
# "action" A list describing a command to run along with its arguments, if
# any. An action command will run at most one time per gclient
# invocation, regardless of how many files matched the pattern.
# The action is executed in the same directory as the .gclient
# file. If the first item in the list is the string "python",
# the current Python interpreter (sys.executable) will be used
# to run the command. If the list contains string
# "$matching_files" it will be removed from the list and the list
# will be extended by the list of matching files.
# "name" An optional string specifying the group to which a hook belongs
# for overriding and organizing.
#
# Example:
# hooks = [
# { "pattern": "\\.(gif|jpe?g|pr0n|png)$",
# "action": ["python", "image_indexer.py", "--all"]},
# { "pattern": ".",
# "name": "gyp",
# "action": ["python", "src/build/gyp_chromium"]},
# ]
#
# Pre-DEPS Hooks
# DEPS files may optionally contain a list named "pre_deps_hooks". These are
# the same as normal hooks, except that they run before the DEPS are
# processed. Pre-DEPS run with "sync" and "revert" unless the --noprehooks
# flag is used.
#
# Specifying a target OS
# An optional key named "target_os" may be added to a gclient file to specify
# one or more additional operating systems that should be considered when
# processing the deps_os/hooks_os dict of a DEPS file.
#
# Example:
# target_os = [ "android" ]
#
# If the "target_os_only" key is also present and true, then *only* the
# operating systems listed in "target_os" will be used.
#
# Example:
# target_os = [ "ios" ]
# target_os_only = True
#
# Specifying a target CPU
# To specify a target CPU, the variables target_cpu and target_cpu_only
# are available and are analagous to target_os and target_os_only.
from __future__ import print_function
__version__ = '0.7'
import collections
import copy
import json
import logging
import optparse
import os
import platform
import posixpath
import pprint
import re
import sys
import time
import urlparse
import detect_host_arch
import fix_encoding
import gclient_eval
import gclient_scm
import gclient_utils
import git_cache
import metrics
import metrics_utils
from third_party.repo.progress import Progress
import subcommand
import subprocess2
import setup_color
# Singleton object to represent an unset cache_dir (as opposed to a disabled
# one, e.g. if a spec explicitly says `cache_dir = None`.)
UNSET_CACHE_DIR = object()
class GNException(Exception):
pass
def ToGNString(value, allow_dicts = True):
"""Returns a stringified GN equivalent of the Python value.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, basestring):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + \
value.replace('\\', '\\\\').replace('"', '\\"').replace('$', '\\$') + \
'"'
if isinstance(value, unicode):
return ToGNString(value.encode('utf-8'))
if isinstance(value, bool):
if value:
return "true"
return "false"
# NOTE: some type handling removed compared to chromium/src copy.
raise GNException("Unsupported type when printing to GN.")
class Hook(object):
"""Descriptor of command ran before/after sync or on demand."""
def __init__(self, action, pattern=None, name=None, cwd=None, condition=None,
variables=None, verbose=False, cwd_base=None):
"""Constructor.
Arguments:
action (list of basestring): argv of the command to run
pattern (basestring regex): noop with git; deprecated
name (basestring): optional name; no effect on operation
cwd (basestring): working directory to use
condition (basestring): condition when to run the hook
variables (dict): variables for evaluating the condition
"""
self._action = gclient_utils.freeze(action)
self._pattern = pattern
self._name = name
self._cwd = cwd
self._condition = condition
self._variables = variables
self._verbose = verbose
self._cwd_base = cwd_base
@staticmethod
def from_dict(d, variables=None, verbose=False, conditions=None,
cwd_base=None):
"""Creates a Hook instance from a dict like in the DEPS file."""
# Merge any local and inherited conditions.
gclient_eval.UpdateCondition(d, 'and', conditions)
return Hook(
d['action'],
d.get('pattern'),
d.get('name'),
d.get('cwd'),
d.get('condition'),
variables=variables,
# Always print the header if not printing to a TTY.
verbose=verbose or not setup_color.IS_TTY,
cwd_base=cwd_base)
@property
def action(self):
return self._action
@property
def pattern(self):
return self._pattern
@property
def name(self):
return self._name
@property
def condition(self):
return self._condition
@property
def effective_cwd(self):
cwd = self._cwd_base
if self._cwd:
cwd = os.path.join(cwd, self._cwd)
return cwd
def matches(self, file_list):
"""Returns true if the pattern matches any of files in the list."""
if not self._pattern:
return True
pattern = re.compile(self._pattern)
return bool([f for f in file_list if pattern.search(f)])
def run(self):
"""Executes the hook's command (provided the condition is met)."""
if (self._condition and
not gclient_eval.EvaluateCondition(self._condition, self._variables)):
return
cmd = [arg for arg in self._action]
if cmd[0] == 'python':
# If the hook specified "python" as the first item, the action is a
# Python script. Run it by starting a new copy of the same
# interpreter.
cmd[0] = sys.executable
elif cmd[0] == 'vpython' and _detect_host_os() == 'win':
cmd[0] += '.bat'
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
cmd, cwd=self.effective_cwd, always=self._verbose)
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print('Error: %s' % str(e), file=sys.stderr)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print("Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(cmd), elapsed_time))
class DependencySettings(object):
"""Immutable configuration settings."""
def __init__(
self, parent, url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process, relative, condition):
# These are not mutable:
self._parent = parent
self._deps_file = deps_file
self._url = url
# The condition as string (or None). Useful to keep e.g. for flatten.
self._condition = condition
# 'managed' determines whether or not this dependency is synced/updated by
# gclient after gclient checks it out initially. The difference between
# 'managed' and 'should_process' is that the user specifies 'managed' via
# the --unmanaged command-line flag or a .gclient config, where
# 'should_process' is dynamically set by gclient if it goes over its
# recursion limit and controls gclient's behavior so it does not misbehave.
self._managed = managed
self._should_process = should_process
# If this is a recursed-upon sub-dependency, and the parent has
# use_relative_paths set, then this dependency should check out its own
# dependencies relative to that parent's path for this, rather than
# relative to the .gclient file.
self._relative = relative
# This is a mutable value which has the list of 'target_os' OSes listed in
# the current deps file.
self.local_target_os = None
# These are only set in .gclient and not in DEPS files.
self._custom_vars = custom_vars or {}
self._custom_deps = custom_deps or {}
self._custom_hooks = custom_hooks or []
# Post process the url to remove trailing slashes.
if isinstance(self.url, basestring):
# urls are sometime incorrectly written as proto://host/path/@rev. Replace
# it to proto://host/path@rev.
self.set_url(self.url.replace('/@', '@'))
elif not isinstance(self.url, (None.__class__)):
raise gclient_utils.Error(
('dependency url must be either string or None, '
'instead of %s') % self.url.__class__.__name__)
# Make any deps_file path platform-appropriate.
if self._deps_file:
for sep in ['/', '\\']:
self._deps_file = self._deps_file.replace(sep, os.sep)
@property
def deps_file(self):
return self._deps_file
@property
def managed(self):
return self._managed
@property
def parent(self):
return self._parent
@property
def root(self):
"""Returns the root node, a GClient object."""
if not self.parent:
# This line is to signal pylint that it could be a GClient instance.
return self or GClient(None, None)
return self.parent.root
@property
def should_process(self):
"""True if this dependency should be processed, i.e. checked out."""
return self._should_process
@property
def custom_vars(self):
return self._custom_vars.copy()
@property
def custom_deps(self):
return self._custom_deps.copy()
@property
def custom_hooks(self):
return self._custom_hooks[:]
@property
def url(self):
"""URL after variable expansion."""
return self._url
@property
def condition(self):
return self._condition
@property
def target_os(self):
if self.local_target_os is not None:
return tuple(set(self.local_target_os).union(self.parent.target_os))
else:
return self.parent.target_os
@property
def target_cpu(self):
return self.parent.target_cpu
def set_url(self, url):
self._url = url
def get_custom_deps(self, name, url):
"""Returns a custom deps if applicable."""
if self.parent:
url = self.parent.get_custom_deps(name, url)
# None is a valid return value to disable a dependency.
return self.custom_deps.get(name, url)
class Dependency(gclient_utils.WorkItem, DependencySettings):
"""Object that represents a dependency checkout."""
def __init__(self, parent, name, url, managed, custom_deps,
custom_vars, custom_hooks, deps_file, should_process,
should_recurse, relative, condition, print_outbuf=False):
gclient_utils.WorkItem.__init__(self, name)
DependencySettings.__init__(
self, parent, url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process, relative, condition)
# This is in both .gclient and DEPS files:
self._deps_hooks = []
self._pre_deps_hooks = []
# Calculates properties:
self._dependencies = []
self._vars = {}
# A cache of the files affected by the current operation, necessary for
# hooks.
self._file_list = []
# List of host names from which dependencies are allowed.
# Default is an empty set, meaning unspecified in DEPS file, and hence all
# hosts will be allowed. Non-empty set means whitelist of hosts.
# allowed_hosts var is scoped to its DEPS file, and so it isn't recursive.
self._allowed_hosts = frozenset()
self._gn_args_from = None
# Spec for .gni output to write (if any).
self._gn_args_file = None
self._gn_args = []
# If it is not set to True, the dependency wasn't processed for its child
# dependency, i.e. its DEPS wasn't read.
self._deps_parsed = False
# This dependency has been processed, i.e. checked out
self._processed = False
# This dependency had its pre-DEPS hooks run
self._pre_deps_hooks_ran = False
# This dependency had its hook run
self._hooks_ran = False
# This is the scm used to checkout self.url. It may be used by dependencies
# to get the datetime of the revision we checked out.
self._used_scm = None
self._used_revision = None
# The actual revision we ended up getting, or None if that information is
# unavailable
self._got_revision = None
# recursedeps is a mutable value that selectively overrides the default
# 'no recursion' setting on a dep-by-dep basis.
#
# It will be a dictionary of {deps_name: depfile_namee}
self.recursedeps = {}
# Whether we should process this dependency's DEPS file.
self._should_recurse = should_recurse
self._OverrideUrl()
# This is inherited from WorkItem. We want the URL to be a resource.
if self.url and isinstance(self.url, basestring):
# The url is usually given to gclient either as https://blah@123
# or just https://blah. The @123 portion is irrelevant.
self.resources.append(self.url.split('@')[0])
# Controls whether we want to print git's output when we first clone the
# dependency
self.print_outbuf = print_outbuf
if not self.name and self.parent:
raise gclient_utils.Error('Dependency without name')
def _OverrideUrl(self):
"""Resolves the parsed url from the parent hierarchy."""
parsed_url = self.get_custom_deps(self._name, self.url)
if parsed_url != self.url:
logging.info('Dependency(%s)._OverrideUrl(%s) -> %s', self._name,
self.url, parsed_url)
self.set_url(parsed_url)
elif isinstance(self.url, basestring):
parsed_url = urlparse.urlparse(self.url)
if (not parsed_url[0] and
not re.match(r'^\w+\@[\w\.-]+\:[\w\/]+', parsed_url[2])):
path = parsed_url[2]
if not path.startswith('/'):
raise gclient_utils.Error(
'relative DEPS entry \'%s\' must begin with a slash' % self.url)
# A relative url. Get the parent url, strip from the last '/'
# (equivalent to unix basename), and append the relative url.
parent_url = self.parent.url
parsed_url = parent_url[:parent_url.rfind('/')] + self.url
logging.info('Dependency(%s)._OverrideUrl(%s) -> %s', self.name,
self.url, parsed_url)
self.set_url(parsed_url)
elif self.url is None:
logging.info('Dependency(%s)._OverrideUrl(None) -> None', self._name)
else:
raise gclient_utils.Error('Unknown url type')
def PinToActualRevision(self):
"""Updates self.url to the revision checked out on disk."""
if self.url is None:
return
url = None
scm = self.CreateSCM()
if os.path.isdir(scm.checkout_path):
revision = scm.revinfo(None, None, None)
url = '%s@%s' % (gclient_utils.SplitUrlRevision(self.url)[0], revision)
self.set_url(url)
def ToLines(self):
s = []
condition_part = ([' "condition": %r,' % self.condition]
if self.condition else [])
s.extend([
' # %s' % self.hierarchy(include_url=False),
' "%s": {' % (self.name,),
' "url": "%s",' % (self.url,),
] + condition_part + [
' },',
'',
])
return s
@property
def requirements(self):
"""Calculate the list of requirements."""
requirements = set()
# self.parent is implicitly a requirement. This will be recursive by
# definition.
if self.parent and self.parent.name:
requirements.add(self.parent.name)
# For a tree with at least 2 levels*, the leaf node needs to depend
# on the level higher up in an orderly way.
# This becomes messy for >2 depth as the DEPS file format is a dictionary,
# thus unsorted, while the .gclient format is a list thus sorted.
#
# Interestingly enough, the following condition only works in the case we
# want: self is a 2nd level node. 3nd level node wouldn't need this since
# they already have their parent as a requirement.
if self.parent and self.parent.parent and not self.parent.parent.parent:
requirements |= set(i.name for i in self.root.dependencies if i.name)
if self.name:
requirements |= set(
obj.name for obj in self.root.subtree(False)
if (obj is not self
and obj.name and
self.name.startswith(posixpath.join(obj.name, ''))))
requirements = tuple(sorted(requirements))
logging.info('Dependency(%s).requirements = %s' % (self.name, requirements))
return requirements
@property
def should_recurse(self):
return self._should_recurse
def verify_validity(self):
"""Verifies that this Dependency is fine to add as a child of another one.
Returns True if this entry should be added, False if it is a duplicate of
another entry.
"""
logging.info('Dependency(%s).verify_validity()' % self.name)
if self.name in [s.name for s in self.parent.dependencies]:
raise gclient_utils.Error(
'The same name "%s" appears multiple times in the deps section' %
self.name)
if not self.should_process:
# Return early, no need to set requirements.
return not any(d.name == self.name for d in self.root.subtree(True))
# This require a full tree traversal with locks.
siblings = [d for d in self.root.subtree(False) if d.name == self.name]
for sibling in siblings:
# Allow to have only one to be None or ''.
if self.url != sibling.url and bool(self.url) == bool(sibling.url):
raise gclient_utils.Error(
('Dependency %s specified more than once:\n'
' %s [%s]\n'
'vs\n'
' %s [%s]') % (
self.name,
sibling.hierarchy(),
sibling.url,
self.hierarchy(),
self.url))
# In theory we could keep it as a shadow of the other one. In
# practice, simply ignore it.
logging.warn('Won\'t process duplicate dependency %s' % sibling)
return False
return True
def _postprocess_deps(self, deps, rel_prefix):
"""Performs post-processing of deps compared to what's in the DEPS file."""
# Make sure the dict is mutable, e.g. in case it's frozen.
deps = dict(deps)
# If a line is in custom_deps, but not in the solution, we want to append
# this line to the solution.
for dep_name, dep_info in self.custom_deps.iteritems():
if dep_name not in deps:
deps[dep_name] = {'url': dep_info, 'dep_type': 'git'}
# Make child deps conditional on any parent conditions. This ensures that,
# when flattened, recursed entries have the correct restrictions, even if
# not explicitly set in the recursed DEPS file. For instance, if
# "src/ios_foo" is conditional on "checkout_ios=True", then anything
# recursively included by "src/ios_foo/DEPS" should also require
# "checkout_ios=True".
if self.condition:
for value in deps.itervalues():
gclient_eval.UpdateCondition(value, 'and', self.condition)
if rel_prefix:
logging.warning('use_relative_paths enabled.')
rel_deps = {}
for d, url in deps.items():
# normpath is required to allow DEPS to use .. in their
# dependency local path.
rel_deps[os.path.normpath(os.path.join(rel_prefix, d))] = url
logging.warning('Updating deps by prepending %s.', rel_prefix)
deps = rel_deps
return deps
def _deps_to_objects(self, deps, use_relative_paths):
"""Convert a deps dict to a dict of Dependency objects."""
deps_to_add = []
for name, dep_value in deps.iteritems():
should_process = self.should_process
if dep_value is None:
continue
condition = dep_value.get('condition')
dep_type = dep_value.get('dep_type')
if condition and not self._get_option('process_all_deps', False):
should_process = should_process and gclient_eval.EvaluateCondition(
condition, self.get_vars())
# The following option is only set by the 'revinfo' command.
if self._get_option('ignore_dep_type', None) == dep_type:
continue
if dep_type == 'cipd':
cipd_root = self.GetCipdRoot()
for package in dep_value.get('packages', []):
deps_to_add.append(
CipdDependency(
parent=self,
name=name,
dep_value=package,
cipd_root=cipd_root,
custom_vars=self.custom_vars,
should_process=should_process,
relative=use_relative_paths,
condition=condition))
else:
url = dep_value.get('url')
deps_to_add.append(
GitDependency(
parent=self,
name=name,
url=url,
managed=True,
custom_deps=None,
custom_vars=self.custom_vars,
custom_hooks=None,
deps_file=self.recursedeps.get(name, self.deps_file),
should_process=should_process,
should_recurse=name in self.recursedeps,
relative=use_relative_paths,
condition=condition))
deps_to_add.sort(key=lambda x: x.name)
return deps_to_add
def ParseDepsFile(self):
"""Parses the DEPS file for this dependency."""
assert not self.deps_parsed
assert not self.dependencies
deps_content = None
# First try to locate the configured deps file. If it's missing, fallback
# to DEPS.
deps_files = [self.deps_file]
if 'DEPS' not in deps_files:
deps_files.append('DEPS')
for deps_file in deps_files:
filepath = os.path.join(self.root.root_dir, self.name, deps_file)
if os.path.isfile(filepath):
logging.info(
'ParseDepsFile(%s): %s file found at %s', self.name, deps_file,
filepath)
break
logging.info(
'ParseDepsFile(%s): No %s file found at %s', self.name, deps_file,
filepath)
if os.path.isfile(filepath):
deps_content = gclient_utils.FileRead(filepath)
logging.debug('ParseDepsFile(%s) read:\n%s', self.name, deps_content)
local_scope = {}
if deps_content:
try:
local_scope = gclient_eval.Parse(
deps_content, self._get_option('validate_syntax', False),
filepath, self.get_vars(), self.get_builtin_vars())
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filepath, e)
if 'allowed_hosts' in local_scope:
try:
self._allowed_hosts = frozenset(local_scope.get('allowed_hosts'))
except TypeError: # raised if non-iterable
pass
if not self._allowed_hosts:
logging.warning("allowed_hosts is specified but empty %s",
self._allowed_hosts)
raise gclient_utils.Error(
'ParseDepsFile(%s): allowed_hosts must be absent '
'or a non-empty iterable' % self.name)
self._gn_args_from = local_scope.get('gclient_gn_args_from')
self._gn_args_file = local_scope.get('gclient_gn_args_file')
self._gn_args = local_scope.get('gclient_gn_args', [])
# It doesn't make sense to set all of these, since setting gn_args_from to
# another DEPS will make gclient ignore any other local gn_args* settings.
assert not (self._gn_args_from and self._gn_args_file), \
'Only specify one of "gclient_gn_args_from" or ' \
'"gclient_gn_args_file + gclient_gn_args".'
self._vars = local_scope.get('vars', {})
if self.parent:
for key, value in self.parent.get_vars().iteritems():
if key in self._vars:
self._vars[key] = value
# Since we heavily post-process things, freeze ones which should
# reflect original state of DEPS.
self._vars = gclient_utils.freeze(self._vars)
# If use_relative_paths is set in the DEPS file, regenerate
# the dictionary using paths relative to the directory containing
# the DEPS file. Also update recursedeps if use_relative_paths is
# enabled.
# If the deps file doesn't set use_relative_paths, but the parent did
# (and therefore set self.relative on this Dependency object), then we
# want to modify the deps and recursedeps by prepending the parent
# directory of this dependency.
use_relative_paths = local_scope.get('use_relative_paths', False)
rel_prefix = None
if use_relative_paths:
rel_prefix = self.name
elif self._relative:
rel_prefix = os.path.dirname(self.name)
if 'recursion' in local_scope:
logging.warning(
'%s: Ignoring recursion = %d.', self.name, local_scope['recursion'])
if 'recursedeps' in local_scope:
for ent in local_scope['recursedeps']:
if isinstance(ent, basestring):
self.recursedeps[ent] = self.deps_file
else: # (depname, depsfilename)
self.recursedeps[ent[0]] = ent[1]
logging.warning('Found recursedeps %r.', repr(self.recursedeps))
if rel_prefix:
logging.warning('Updating recursedeps by prepending %s.', rel_prefix)
rel_deps = {}
for depname, options in self.recursedeps.iteritems():
rel_deps[
os.path.normpath(os.path.join(rel_prefix, depname))] = options
self.recursedeps = rel_deps
# To get gn_args from another DEPS, that DEPS must be recursed into.
if self._gn_args_from:
assert self.recursedeps and self._gn_args_from in self.recursedeps, \
'The "gclient_gn_args_from" value must be in recursedeps.'
# If present, save 'target_os' in the local_target_os property.
if 'target_os' in local_scope:
self.local_target_os = local_scope['target_os']
deps = local_scope.get('deps', {})
deps_to_add = self._deps_to_objects(
self._postprocess_deps(deps, rel_prefix), use_relative_paths)
# compute which working directory should be used for hooks
use_relative_hooks = local_scope.get('use_relative_hooks', False)
hooks_cwd = self.root.root_dir
if use_relative_hooks:
if not use_relative_paths:
raise gclient_utils.Error(
'ParseDepsFile(%s): use_relative_hooks must be used with '
'use_relative_paths' % self.name)
hooks_cwd = os.path.join(hooks_cwd, self.name)
logging.warning('Updating hook base working directory to %s.',
hooks_cwd)
# override named sets of hooks by the custom hooks
hooks_to_run = []
hook_names_to_suppress = [c.get('name', '') for c in self.custom_hooks]
for hook in local_scope.get('hooks', []):
if hook.get('name', '') not in hook_names_to_suppress:
hooks_to_run.append(hook)
# add the replacements and any additions
for hook in self.custom_hooks:
if 'action' in hook:
hooks_to_run.append(hook)
if self.should_recurse:
self._pre_deps_hooks = [
Hook.from_dict(hook, variables=self.get_vars(), verbose=True,
conditions=self.condition, cwd_base=hooks_cwd)
for hook in local_scope.get('pre_deps_hooks', [])
]
self.add_dependencies_and_close(deps_to_add, hooks_to_run,
hooks_cwd=hooks_cwd)
logging.info('ParseDepsFile(%s) done' % self.name)
def _get_option(self, attr, default):
obj = self
while not hasattr(obj, '_options'):
obj = obj.parent
return getattr(obj._options, attr, default)
def add_dependencies_and_close(self, deps_to_add, hooks, hooks_cwd=None):
"""Adds the dependencies, hooks and mark the parsing as done."""
if hooks_cwd == None:
hooks_cwd = self.root.root_dir
for dep in deps_to_add:
if dep.verify_validity():
self.add_dependency(dep)
self._mark_as_parsed([
Hook.from_dict(
h, variables=self.get_vars(), verbose=self.root._options.verbose,
conditions=self.condition, cwd_base=hooks_cwd)
for h in hooks
])
def findDepsFromNotAllowedHosts(self):
"""Returns a list of dependencies from not allowed hosts.
If allowed_hosts is not set, allows all hosts and returns empty list.
"""
if not self._allowed_hosts:
return []
bad_deps = []
for dep in self._dependencies:
# Don't enforce this for custom_deps.
if dep.name in self._custom_deps:
continue
if isinstance(dep.url, basestring):
parsed_url = urlparse.urlparse(dep.url)
if parsed_url.netloc and parsed_url.netloc not in self._allowed_hosts:
bad_deps.append(dep)
return bad_deps
def FuzzyMatchUrl(self, candidates):
"""Attempts to find this dependency in the list of candidates.
It looks first for the URL of this dependency in the list of
candidates. If it doesn't succeed, and the URL ends in '.git', it will try
looking for the URL minus '.git'. Finally it will try to look for the name
of the dependency.
Args:
candidates: list, dict. The list of candidates in which to look for this
dependency. It can contain URLs as above, or dependency names like
"src/some/dep".
Returns:
If this dependency is not found in the list of candidates, returns None.
Otherwise, it returns under which name did we find this dependency:
- Its parsed url: "https://example.com/src.git'
- Its parsed url minus '.git': "https://example.com/src"
- Its name: "src"
"""
if self.url:
origin, _ = gclient_utils.SplitUrlRevision(self.url)
if origin in candidates:
return origin
if origin.endswith('.git') and origin[:-len('.git')] in candidates:
return origin[:-len('.git')]
if origin + '.git' in candidates:
return origin + '.git'
if self.name in candidates:
return self.name
return None
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def run(self, revision_overrides, command, args, work_queue, options,
patch_refs, target_branches):
"""Runs |command| then parse the DEPS file."""
logging.info('Dependency(%s).run()' % self.name)
assert self._file_list == []
if not self.should_process:
return
# When running runhooks, there's no need to consult the SCM.
# All known hooks are expected to run unconditionally regardless of working
# copy state, so skip the SCM status check.
run_scm = command not in (
'flatten', 'runhooks', 'recurse', 'validate', None)
file_list = [] if not options.nohooks else None
revision_override = revision_overrides.pop(
self.FuzzyMatchUrl(revision_overrides), None)
if not revision_override and not self.managed:
revision_override = 'unmanaged'
if run_scm and self.url:
# Create a shallow copy to mutate revision.
options = copy.copy(options)
options.revision = revision_override
self._used_revision = options.revision
self._used_scm = self.CreateSCM(out_cb=work_queue.out_cb)
self._got_revision = self._used_scm.RunCommand(command, options, args,
file_list)
patch_repo = self.url.split('@')[0]
patch_ref = patch_refs.pop(self.FuzzyMatchUrl(patch_refs), None)
target_branch = target_branches.pop(
self.FuzzyMatchUrl(target_branches), None)
if command == 'update' and patch_ref is not None:
self._used_scm.apply_patch_ref(patch_repo, patch_ref, target_branch,
options, file_list)
if file_list:
file_list = [os.path.join(self.name, f.strip()) for f in file_list]
# TODO(phajdan.jr): We should know exactly when the paths are absolute.
# Convert all absolute paths to relative.
for i in range(len(file_list or [])):
# It depends on the command being executed (like runhooks vs sync).
if not os.path.isabs(file_list[i]):
continue
prefix = os.path.commonprefix(
[self.root.root_dir.lower(), file_list[i].lower()])
file_list[i] = file_list[i][len(prefix):]
# Strip any leading path separators.
while file_list[i].startswith(('\\', '/')):
file_list[i] = file_list[i][1:]
if self.should_recurse:
self.ParseDepsFile()
self._run_is_done(file_list or [])
if self.should_recurse:
if command in ('update', 'revert') and not options.noprehooks:
self.RunPreDepsHooks()
# Parse the dependencies of this dependency.
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
if command == 'recurse':
# Skip file only checkout.
scm = self.GetScmName()
if not options.scm or scm in options.scm:
cwd = os.path.normpath(os.path.join(self.root.root_dir, self.name))
# Pass in the SCM type as an env variable. Make sure we don't put
# unicode strings in the environment.
env = os.environ.copy()
if scm:
env['GCLIENT_SCM'] = str(scm)
if self.url:
env['GCLIENT_URL'] = str(self.url)
env['GCLIENT_DEP_PATH'] = str(self.name)
if options.prepend_dir and scm == 'git':
print_stdout = False
def filter_fn(line):
"""Git-specific path marshaling. It is optimized for git-grep."""
def mod_path(git_pathspec):
match = re.match('^(\\S+?:)?([^\0]+)$', git_pathspec)
modified_path = os.path.join(self.name, match.group(2))
branch = match.group(1) or ''
return '%s%s' % (branch, modified_path)
match = re.match('^Binary file ([^\0]+) matches$', line)
if match:
print('Binary file %s matches\n' % mod_path(match.group(1)))
return
items = line.split('\0')
if len(items) == 2 and items[1]:
print('%s : %s' % (mod_path(items[0]), items[1]))
elif len(items) >= 2:
# Multiple null bytes or a single trailing null byte indicate
# git is likely displaying filenames only (such as with -l)
print('\n'.join(mod_path(path) for path in items if path))
else:
print(line)
else:
print_stdout = True
filter_fn = None
if self.url is None:
print('Skipped omitted dependency %s' % cwd, file=sys.stderr)
elif os.path.isdir(cwd):
try:
gclient_utils.CheckCallAndFilter(
args, cwd=cwd, env=env, print_stdout=print_stdout,
filter_fn=filter_fn,
)
except subprocess2.CalledProcessError:
if not options.ignore:
raise
else:
print('Skipped missing %s' % cwd, file=sys.stderr)
def GetScmName(self):
raise NotImplementedError()
def CreateSCM(self, out_cb=None):
raise NotImplementedError()
def HasGNArgsFile(self):
return self._gn_args_file is not None
def WriteGNArgsFile(self):
lines = ['# Generated from %r' % self.deps_file]
variables = self.get_vars()
for arg in self._gn_args:
value = variables[arg]
if isinstance(value, basestring):
value = gclient_eval.EvaluateCondition(value, variables)
lines.append('%s = %s' % (arg, ToGNString(value)))
with open(os.path.join(self.root.root_dir, self._gn_args_file), 'w') as f:
f.write('\n'.join(lines))
@gclient_utils.lockedmethod
def _run_is_done(self, file_list):
# Both these are kept for hooks that are run as a separate tree traversal.
self._file_list = file_list
self._processed = True
def GetHooks(self, options):
"""Evaluates all hooks, and return them in a flat list.
RunOnDeps() must have been called before to load the DEPS.
"""
result = []
if not self.should_process or not self.should_recurse:
# Don't run the hook when it is above recursion_limit.
return result
# If "--force" was specified, run all hooks regardless of what files have
# changed.
if self.deps_hooks:
# TODO(maruel): If the user is using git, then we don't know
# what files have changed so we always run all hooks. It'd be nice to fix
# that.
result.extend(self.deps_hooks)
for s in self.dependencies:
result.extend(s.GetHooks(options))
return result
def RunHooksRecursively(self, options, progress):
assert self.hooks_ran == False
self._hooks_ran = True
hooks = self.GetHooks(options)
if progress:
progress._total = len(hooks)
for hook in hooks:
if progress:
progress.update(extra=hook.name or '')
hook.run()
if progress:
progress.end()
def RunPreDepsHooks(self):
assert self.processed
assert self.deps_parsed
assert not self.pre_deps_hooks_ran
assert not self.hooks_ran
for s in self.dependencies:
assert not s.processed
self._pre_deps_hooks_ran = True
for hook in self.pre_deps_hooks:
hook.run()
def GetCipdRoot(self):
if self.root is self:
# Let's not infinitely recurse. If this is root and isn't an
# instance of GClient, do nothing.
return None
return self.root.GetCipdRoot()
def subtree(self, include_all):
"""Breadth first recursion excluding root node."""
dependencies = self.dependencies
for d in dependencies:
if d.should_process or include_all:
yield d
for d in dependencies:
for i in d.subtree(include_all):
yield i
@gclient_utils.lockedmethod
def add_dependency(self, new_dep):
self._dependencies.append(new_dep)
@gclient_utils.lockedmethod
def _mark_as_parsed(self, new_hooks):
self._deps_hooks.extend(new_hooks)
self._deps_parsed = True
@property
@gclient_utils.lockedmethod
def dependencies(self):
return tuple(self._dependencies)
@property
@gclient_utils.lockedmethod
def deps_hooks(self):
return tuple(self._deps_hooks)
@property
@gclient_utils.lockedmethod
def pre_deps_hooks(self):
return tuple(self._pre_deps_hooks)
@property
@gclient_utils.lockedmethod
def deps_parsed(self):
"""This is purely for debugging purposes. It's not used anywhere."""
return self._deps_parsed
@property
@gclient_utils.lockedmethod
def processed(self):
return self._processed
@property
@gclient_utils.lockedmethod
def pre_deps_hooks_ran(self):
return self._pre_deps_hooks_ran
@property
@gclient_utils.lockedmethod
def hooks_ran(self):
return self._hooks_ran
@property
@gclient_utils.lockedmethod
def allowed_hosts(self):
return self._allowed_hosts
@property
@gclient_utils.lockedmethod
def file_list(self):
return tuple(self._file_list)
@property
def used_scm(self):
"""SCMWrapper instance for this dependency or None if not processed yet."""
return self._used_scm
@property
@gclient_utils.lockedmethod
def got_revision(self):
return self._got_revision
@property
def file_list_and_children(self):
result = list(self.file_list)
for d in self.dependencies:
result.extend(d.file_list_and_children)
return tuple(result)
def __str__(self):
out = []
for i in ('name', 'url', 'custom_deps',
'custom_vars', 'deps_hooks', 'file_list', 'should_process',
'processed', 'hooks_ran', 'deps_parsed', 'requirements',
'allowed_hosts'):
# First try the native property if it exists.
if hasattr(self, '_' + i):
value = getattr(self, '_' + i, False)
else:
value = getattr(self, i, False)
if value:
out.append('%s: %s' % (i, value))
for d in self.dependencies:
out.extend([' ' + x for x in str(d).splitlines()])
out.append('')
return '\n'.join(out)
def __repr__(self):
return '%s: %s' % (self.name, self.url)
def hierarchy(self, include_url=True):
"""Returns a human-readable hierarchical reference to a Dependency."""
def format_name(d):
if include_url:
return '%s(%s)' % (d.name, d.url)
return d.name
out = format_name(self)
i = self.parent
while i and i.name:
out = '%s -> %s' % (format_name(i), out)
i = i.parent
return out
def hierarchy_data(self):
"""Returns a machine-readable hierarchical reference to a Dependency."""
d = self
out = []
while d and d.name:
out.insert(0, (d.name, d.url))
d = d.parent
return tuple(out)
def get_builtin_vars(self):
return {
'checkout_android': 'android' in self.target_os,
'checkout_chromeos': 'chromeos' in self.target_os,
'checkout_fuchsia': 'fuchsia' in self.target_os,
'checkout_ios': 'ios' in self.target_os,
'checkout_linux': 'unix' in self.target_os,
'checkout_mac': 'mac' in self.target_os,
'checkout_win': 'win' in self.target_os,
'host_os': _detect_host_os(),
'checkout_arm': 'arm' in self.target_cpu,
'checkout_arm64': 'arm64' in self.target_cpu,
'checkout_x86': 'x86' in self.target_cpu,
'checkout_mips': 'mips' in self.target_cpu,
'checkout_mips64': 'mips64' in self.target_cpu,
'checkout_ppc': 'ppc' in self.target_cpu,
'checkout_s390': 's390' in self.target_cpu,
'checkout_x64': 'x64' in self.target_cpu,
'host_cpu': detect_host_arch.HostArch(),
}
def get_vars(self):
"""Returns a dictionary of effective variable values
(DEPS file contents with applied custom_vars overrides)."""
# Variable precedence (last has highest):
# - DEPS vars
# - parents, from first to last
# - built-in
# - custom_vars overrides
result = {}
result.update(self._vars)
if self.parent:
parent_vars = self.parent.get_vars()
result.update(parent_vars)
# Provide some built-in variables.
result.update(self.get_builtin_vars())
result.update(self.custom_vars or {})
return result
_PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
'aix6': 'aix',
}
def _detect_host_os():
return _PLATFORM_MAPPING[sys.platform]
class GitDependency(Dependency):
"""A Dependency object that represents a single git checkout."""
#override
def GetScmName(self):
"""Always 'git'."""
return 'git'
#override
def CreateSCM(self, out_cb=None):
"""Create a Wrapper instance suitable for handling this git dependency."""
return gclient_scm.GitWrapper(
self.url, self.root.root_dir, self.name, self.outbuf, out_cb,
print_outbuf=self.print_outbuf)
class GClient(GitDependency):
"""Object that represent a gclient checkout. A tree of Dependency(), one per
solution or DEPS entry."""
DEPS_OS_CHOICES = {
"aix6": "unix",
"win32": "win",
"win": "win",
"cygwin": "win",
"darwin": "mac",
"mac": "mac",
"unix": "unix",
"linux": "unix",
"linux2": "unix",
"linux3": "unix",
"android": "android",
"ios": "ios",
"fuchsia": "fuchsia",
}
DEFAULT_CLIENT_FILE_TEXT = ("""\
solutions = [
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
},
"custom_vars": %(custom_vars)r,
},
]
""")
DEFAULT_CLIENT_CACHE_DIR_TEXT = ("""\
cache_dir = %(cache_dir)r
""")
DEFAULT_SNAPSHOT_FILE_TEXT = ("""\
# Snapshot generated with gclient revinfo --snapshot
solutions = %(solution_list)s
""")
def __init__(self, root_dir, options):
# Do not change previous behavior. Only solution level and immediate DEPS
# are processed.
self._recursion_limit = 2
super(GClient, self).__init__(
parent=None,
name=None,
url=None,
managed=True,
custom_deps=None,
custom_vars=None,
custom_hooks=None,
deps_file='unused',
should_process=True,
should_recurse=True,
relative=None,
condition=None,
print_outbuf=True)
self._options = options
if options.deps_os:
enforced_os = options.deps_os.split(',')
else:
enforced_os = [self.DEPS_OS_CHOICES.get(sys.platform, 'unix')]
if 'all' in enforced_os:
enforced_os = self.DEPS_OS_CHOICES.itervalues()
self._enforced_os = tuple(set(enforced_os))
self._enforced_cpu = detect_host_arch.HostArch(),
self._root_dir = root_dir
self._cipd_root = None
self.config_content = None
def _CheckConfig(self):
"""Verify that the config matches the state of the existing checked-out
solutions."""
for dep in self.dependencies:
if dep.managed and dep.url:
scm = dep.CreateSCM()
actual_url = scm.GetActualRemoteURL(self._options)
if actual_url and not scm.DoesRemoteURLMatch(self._options):
mirror = scm.GetCacheMirror()
if mirror:
mirror_string = '%s (exists=%s)' % (mirror.mirror_path,
mirror.exists())
else:
mirror_string = 'not used'
raise gclient_utils.Error('''
Your .gclient file seems to be broken. The requested URL is different from what
is actually checked out in %(checkout_path)s.
The .gclient file contains:
URL: %(expected_url)s (%(expected_scm)s)
Cache mirror: %(mirror_string)s
The local checkout in %(checkout_path)s reports:
%(actual_url)s (%(actual_scm)s)
You should ensure that the URL listed in .gclient is correct and either change
it or fix the checkout.
''' % {'checkout_path': os.path.join(self.root_dir, dep.name),
'expected_url': dep.url,
'expected_scm': dep.GetScmName(),
'mirror_string' : mirror_string,
'actual_url': actual_url,
'actual_scm': dep.GetScmName()})
def SetConfig(self, content):
assert not self.dependencies
config_dict = {}
self.config_content = content
try:
exec(content, config_dict)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError('.gclient', e)
# Append any target OS that is not already being enforced to the tuple.
target_os = config_dict.get('target_os', [])
if config_dict.get('target_os_only', False):
self._enforced_os = tuple(set(target_os))
else:
self._enforced_os = tuple(set(self._enforced_os).union(target_os))
# Append any target CPU that is not already being enforced to the tuple.
target_cpu = config_dict.get('target_cpu', [])
if config_dict.get('target_cpu_only', False):
self._enforced_cpu = tuple(set(target_cpu))
else:
self._enforced_cpu = tuple(set(self._enforced_cpu).union(target_cpu))
cache_dir = config_dict.get('cache_dir', UNSET_CACHE_DIR)
if cache_dir is not UNSET_CACHE_DIR:
if cache_dir:
cache_dir = os.path.join(self.root_dir, cache_dir)
cache_dir = os.path.abspath(cache_dir)
git_cache.Mirror.SetCachePath(cache_dir)
if not target_os and config_dict.get('target_os_only', False):
raise gclient_utils.Error('Can\'t use target_os_only if target_os is '
'not specified')
if not target_cpu and config_dict.get('target_cpu_only', False):
raise gclient_utils.Error('Can\'t use target_cpu_only if target_cpu is '
'not specified')
deps_to_add = []
for s in config_dict.get('solutions', []):
try:
deps_to_add.append(GitDependency(
parent=self,
name=s['name'],
url=s['url'],
managed=s.get('managed', True),
custom_deps=s.get('custom_deps', {}),
custom_vars=s.get('custom_vars', {}),
custom_hooks=s.get('custom_hooks', []),
deps_file=s.get('deps_file', 'DEPS'),
should_process=True,
should_recurse=True,
relative=None,
condition=None,
print_outbuf=True))
except KeyError:
raise gclient_utils.Error('Invalid .gclient file. Solution is '
'incomplete: %s' % s)
metrics.collector.add(
'project_urls',
[
dep.url if not dep.url.endswith('.git') else dep.url[:-len('.git')]
for dep in deps_to_add
if dep.FuzzyMatchUrl(metrics_utils.KNOWN_PROJECT_URLS)
]
)
self.add_dependencies_and_close(deps_to_add, config_dict.get('hooks', []))
logging.info('SetConfig() done')
def SaveConfig(self):
gclient_utils.FileWrite(os.path.join(self.root_dir,
self._options.config_filename),
self.config_content)
@staticmethod
def LoadCurrentConfig(options):
"""Searches for and loads a .gclient file relative to the current working
dir. Returns a GClient object."""
if options.spec:
client = GClient('.', options)
client.SetConfig(options.spec)
else:
if options.verbose:
print('Looking for %s starting from %s\n' % (
options.config_filename, os.getcwd()))
path = gclient_utils.FindGclientRoot(os.getcwd(), options.config_filename)
if not path:
if options.verbose:
print('Couldn\'t find configuration file.')
return None
client = GClient(path, options)
client.SetConfig(gclient_utils.FileRead(
os.path.join(path, options.config_filename)))
if (options.revisions and
len(client.dependencies) > 1 and
any('@' not in r for r in options.revisions)):
print(
('You must specify the full solution name like --revision %s@%s\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: %s.') % (
client.dependencies[0].name,
options.revisions[0],
', '.join(s.name for s in client.dependencies[1:])),
file=sys.stderr)
return client
def SetDefaultConfig(self, solution_name, deps_file, solution_url,
managed=True, cache_dir=UNSET_CACHE_DIR,
custom_vars=None):
text = self.DEFAULT_CLIENT_FILE_TEXT
format_dict = {
'solution_name': solution_name,
'solution_url': solution_url,
'deps_file': deps_file,
'managed': managed,
'custom_vars': custom_vars or {},
}
if cache_dir is not UNSET_CACHE_DIR:
text += self.DEFAULT_CLIENT_CACHE_DIR_TEXT
format_dict['cache_dir'] = cache_dir
self.SetConfig(text % format_dict)
def _SaveEntries(self):
"""Creates a .gclient_entries file to record the list of unique checkouts.
The .gclient_entries file lives in the same directory as .gclient.
"""
# Sometimes pprint.pformat will use {', sometimes it'll use { ' ... It
# makes testing a bit too fun.
result = 'entries = {\n'
for entry in self.root.subtree(False):
result += ' %s: %s,\n' % (pprint.pformat(entry.name),
pprint.pformat(entry.url))
result += '}\n'
file_path = os.path.join(self.root_dir, self._options.entries_filename)
logging.debug(result)
gclient_utils.FileWrite(file_path, result)
def _ReadEntries(self):
"""Read the .gclient_entries file for the given client.
Returns:
A sequence of solution names, which will be empty if there is the
entries file hasn't been created yet.
"""
scope = {}
filename = os.path.join(self.root_dir, self._options.entries_filename)
if not os.path.exists(filename):
return {}
try:
exec(gclient_utils.FileRead(filename), scope)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filename, e)
return scope.get('entries', {})
def _EnforceRevisions(self):
"""Checks for revision overrides."""
revision_overrides = {}
if self._options.head:
return revision_overrides
if not self._options.revisions:
return revision_overrides
solutions_names = [s.name for s in self.dependencies]
index = 0
for revision in self._options.revisions:
if not '@' in revision:
# Support for --revision 123
revision = '%s@%s' % (solutions_names[index], revision)
name, rev = revision.split('@', 1)
revision_overrides[name] = rev
index += 1
return revision_overrides
def _EnforcePatchRefsAndBranches(self):
"""Checks for patch refs."""
patch_refs = {}
target_branches = {}
if not self._options.patch_refs:
return patch_refs, target_branches
for given_patch_ref in self._options.patch_refs:
patch_repo, _, patch_ref = given_patch_ref.partition('@')
if not patch_repo or not patch_ref:
raise gclient_utils.Error(
'Wrong revision format: %s should be of the form '
'patch_repo@[target_branch:]patch_ref.' % given_patch_ref)
if ':' in patch_ref:
target_branch, _, patch_ref = patch_ref.partition(':')
target_branches[patch_repo] = target_branch
patch_refs[patch_repo] = patch_ref
return patch_refs, target_branches
def _RemoveUnversionedGitDirs(self):
"""Remove directories that are no longer part of the checkout.
Notify the user if there is an orphaned entry in their working copy.
Only delete the directory if there are no changes in it, and
delete_unversioned_trees is set to true.
"""
entries = [i.name for i in self.root.subtree(False) if i.url]
full_entries = [os.path.join(self.root_dir, e.replace('/', os.path.sep))
for e in entries]
for entry, prev_url in self._ReadEntries().iteritems():
if not prev_url:
# entry must have been overridden via .gclient custom_deps
continue
# Fix path separator on Windows.
entry_fixed = entry.replace('/', os.path.sep)
e_dir = os.path.join(self.root_dir, entry_fixed)
# Use entry and not entry_fixed there.
if (entry not in entries and
(not any(path.startswith(entry + '/') for path in entries)) and
os.path.exists(e_dir)):
# The entry has been removed from DEPS.
scm = gclient_scm.GitWrapper(
prev_url, self.root_dir, entry_fixed, self.outbuf)
# Check to see if this directory is now part of a higher-up checkout.
scm_root = None
try:
scm_root = gclient_scm.scm.GIT.GetCheckoutRoot(scm.checkout_path)
except subprocess2.CalledProcessError:
pass
if not scm_root:
logging.warning('Could not find checkout root for %s. Unable to '
'determine whether it is part of a higher-level '
'checkout, so not removing.' % entry)
continue
# This is to handle the case of third_party/WebKit migrating from
# being a DEPS entry to being part of the main project.
# If the subproject is a Git project, we need to remove its .git
# folder. Otherwise git operations on that folder will have different
# effects depending on the current working directory.
if os.path.abspath(scm_root) == os.path.abspath(e_dir):
e_par_dir = os.path.join(e_dir, os.pardir)
if gclient_scm.scm.GIT.IsInsideWorkTree(e_par_dir):
par_scm_root = gclient_scm.scm.GIT.GetCheckoutRoot(e_par_dir)
# rel_e_dir : relative path of entry w.r.t. its parent repo.
rel_e_dir = os.path.relpath(e_dir, par_scm_root)
if gclient_scm.scm.GIT.IsDirectoryVersioned(
par_scm_root, rel_e_dir):
save_dir = scm.GetGitBackupDirPath()
# Remove any eventual stale backup dir for the same project.
if os.path.exists(save_dir):
gclient_utils.rmtree(save_dir)
os.rename(os.path.join(e_dir, '.git'), save_dir)
# When switching between the two states (entry/ is a subproject
# -> entry/ is part of the outer project), it is very likely
# that some files are changed in the checkout, unless we are
# jumping *exactly* across the commit which changed just DEPS.
# In such case we want to cleanup any eventual stale files
# (coming from the old subproject) in order to end up with a
# clean checkout.
gclient_scm.scm.GIT.CleanupDir(par_scm_root, rel_e_dir)
assert not os.path.exists(os.path.join(e_dir, '.git'))
print(('\nWARNING: \'%s\' has been moved from DEPS to a higher '
'level checkout. The git folder containing all the local'
' branches has been saved to %s.\n'
'If you don\'t care about its state you can safely '
'remove that folder to free up space.') %
(entry, save_dir))
continue
if scm_root in full_entries:
logging.info('%s is part of a higher level checkout, not removing',
scm.GetCheckoutRoot())
continue
file_list = []
scm.status(self._options, [], file_list)
modified_files = file_list != []
if (not self._options.delete_unversioned_trees or
(modified_files and not self._options.force)):
# There are modified files in this entry. Keep warning until
# removed.
print(('\nWARNING: \'%s\' is no longer part of this client. '
'It is recommended that you manually remove it.\n') %
entry_fixed)
else:
# Delete the entry
print('\n________ deleting \'%s\' in \'%s\'' % (
entry_fixed, self.root_dir))
gclient_utils.rmtree(e_dir)
# record the current list of entries for next time
self._SaveEntries()
def RunOnDeps(self, command, args, ignore_requirements=False, progress=True):
"""Runs a command on each dependency in a client and its dependencies.
Args:
command: The command to use (e.g., 'status' or 'diff')
args: list of str - extra arguments to add to the command line.
"""
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
revision_overrides = {}
patch_refs = {}
target_branches = {}
# It's unnecessary to check for revision overrides for 'recurse'.
# Save a few seconds by not calling _EnforceRevisions() in that case.
if command not in ('diff', 'recurse', 'runhooks', 'status', 'revert',
'validate'):
self._CheckConfig()
revision_overrides = self._EnforceRevisions()
if command == 'update':
patch_refs, target_branches = self._EnforcePatchRefsAndBranches()
# Disable progress for non-tty stdout.
should_show_progress = (
setup_color.IS_TTY and not self._options.verbose and progress)
pm = None
if should_show_progress:
if command in ('update', 'revert'):
pm = Progress('Syncing projects', 1)
elif command in ('recurse', 'validate'):
pm = Progress(' '.join(args), 1)
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, pm, ignore_requirements=ignore_requirements,
verbose=self._options.verbose)
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
work_queue.flush(revision_overrides, command, args, options=self._options,
patch_refs=patch_refs, target_branches=target_branches)
if revision_overrides:
print('Please fix your script, having invalid --revision flags will soon '
'be considered an error.', file=sys.stderr)
if patch_refs:
raise gclient_utils.Error(
'The following --patch-ref flags were not used. Please fix it:\n%s' %
('\n'.join(
patch_repo + '@' + patch_ref
for patch_repo, patch_ref in patch_refs.iteritems())))
# Once all the dependencies have been processed, it's now safe to write
# out the gn_args_file and run the hooks.
if command == 'update':
gn_args_dep = self.dependencies[0]
if gn_args_dep._gn_args_from:
deps_map = dict([(dep.name, dep) for dep in gn_args_dep.dependencies])
gn_args_dep = deps_map.get(gn_args_dep._gn_args_from)
if gn_args_dep and gn_args_dep.HasGNArgsFile():
gn_args_dep.WriteGNArgsFile()
self._RemoveUnversionedGitDirs()
# Sync CIPD dependencies once removed deps are deleted. In case a git
# dependency was moved to CIPD, we want to remove the old git directory
# first and then sync the CIPD dep.
if self._cipd_root:
self._cipd_root.run(command)
if not self._options.nohooks:
if should_show_progress:
pm = Progress('Running hooks', 1)
self.RunHooksRecursively(self._options, pm)
return 0
def PrintRevInfo(self):
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
# Load all the settings.
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, None, False, verbose=self._options.verbose)
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=self._options, patch_refs=None,
target_branches=None)
def ShouldPrintRevision(dep):
return (not self._options.filter
or dep.FuzzyMatchUrl(self._options.filter))
if self._options.snapshot:
json_output = []
# First level at .gclient
for d in self.dependencies:
entries = {}
def GrabDeps(dep):
"""Recursively grab dependencies."""
for d in dep.dependencies:
d.PinToActualRevision()
if ShouldPrintRevision(d):
entries[d.name] = d.url
GrabDeps(d)
GrabDeps(d)
json_output.append({
'name': d.name,
'solution_url': d.url,
'deps_file': d.deps_file,
'managed': d.managed,
'custom_deps': entries,
})
if self._options.output_json == '-':
print(json.dumps(json_output, indent=2, separators=(',', ': ')))
elif self._options.output_json:
with open(self._options.output_json, 'w') as f:
json.dump(json_output, f)
else:
# Print the snapshot configuration file
print(self.DEFAULT_SNAPSHOT_FILE_TEXT % {
'solution_list': pprint.pformat(json_output, indent=2),
})
else:
entries = {}
for d in self.root.subtree(False):
if self._options.actual:
d.PinToActualRevision()
if ShouldPrintRevision(d):
entries[d.name] = d.url
if self._options.output_json:
json_output = {
name: {
'url': rev.split('@')[0] if rev else None,
'rev': rev.split('@')[1] if rev and '@' in rev else None,
}
for name, rev in entries.iteritems()
}
if self._options.output_json == '-':
print(json.dumps(json_output, indent=2, separators=(',', ': ')))
else:
with open(self._options.output_json, 'w') as f:
json.dump(json_output, f)
else:
keys = sorted(entries.keys())
for x in keys:
print('%s: %s' % (x, entries[x]))
logging.info(str(self))
def ParseDepsFile(self):
"""No DEPS to parse for a .gclient file."""
raise gclient_utils.Error('Internal error')
def PrintLocationAndContents(self):
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print('Loaded .gclient config in %s:\n%s' % (
self.root_dir, self.config_content))
def GetCipdRoot(self):
if not self._cipd_root:
self._cipd_root = gclient_scm.CipdRoot(
self.root_dir,
# TODO(jbudorick): Support other service URLs as necessary.
# Service URLs should be constant over the scope of a cipd
# root, so a var per DEPS file specifying the service URL
# should suffice.
'https://chrome-infra-packages.appspot.com')
return self._cipd_root
@property
def root_dir(self):
"""Root directory of gclient checkout."""
return self._root_dir
@property
def enforced_os(self):
"""What deps_os entries that are to be parsed."""
return self._enforced_os
@property
def target_os(self):
return self._enforced_os
@property
def target_cpu(self):
return self._enforced_cpu
class CipdDependency(Dependency):
"""A Dependency object that represents a single CIPD package."""
def __init__(
self, parent, name, dep_value, cipd_root,
custom_vars, should_process, relative, condition):
package = dep_value['package']
version = dep_value['version']
url = urlparse.urljoin(
cipd_root.service_url, '%s@%s' % (package, version))
super(CipdDependency, self).__init__(
parent=parent,
name=name + ':' + package,
url=url,
managed=None,
custom_deps=None,
custom_vars=custom_vars,
custom_hooks=None,
deps_file=None,
should_process=should_process,
should_recurse=False,
relative=relative,
condition=condition)
if relative:
# TODO(jbudorick): Implement relative if necessary.
raise gclient_utils.Error(
'Relative CIPD dependencies are not currently supported.')
self._cipd_package = None
self._cipd_root = cipd_root
# CIPD wants /-separated paths, even on Windows.
native_subdir_path = os.path.relpath(
os.path.join(self.root.root_dir, name), cipd_root.root_dir)
self._cipd_subdir = posixpath.join(*native_subdir_path.split(os.sep))
self._package_name = package
self._package_version = version
#override
def run(self, revision_overrides, command, args, work_queue, options,
patch_refs, target_branches):
"""Runs |command| then parse the DEPS file."""
logging.info('CipdDependency(%s).run()' % self.name)
if not self.should_process:
return
self._CreatePackageIfNecessary()
super(CipdDependency, self).run(revision_overrides, command, args,
work_queue, options, patch_refs,
target_branches)
def _CreatePackageIfNecessary(self):
# We lazily create the CIPD package to make sure that only packages
# that we want (as opposed to all packages defined in all DEPS files
# we parse) get added to the root and subsequently ensured.
if not self._cipd_package:
self._cipd_package = self._cipd_root.add_package(
self._cipd_subdir, self._package_name, self._package_version)
def ParseDepsFile(self):
"""CIPD dependencies are not currently allowed to have nested deps."""
self.add_dependencies_and_close([], [])
#override
def verify_validity(self):
"""CIPD dependencies allow duplicate name for packages in same directory."""
logging.info('Dependency(%s).verify_validity()' % self.name)
return True
#override
def GetScmName(self):
"""Always 'cipd'."""
return 'cipd'
#override
def CreateSCM(self, out_cb=None):
"""Create a Wrapper instance suitable for handling this CIPD dependency."""
self._CreatePackageIfNecessary()
return gclient_scm.CipdWrapper(
self.url, self.root.root_dir, self.name, self.outbuf, out_cb,
root=self._cipd_root, package=self._cipd_package)
def hierarchy(self, include_url=False):
return self.parent.hierarchy(include_url) + ' -> ' + self._cipd_subdir
def ToLines(self):
"""Return a list of lines representing this in a DEPS file."""
def escape_cipd_var(package):
return package.replace('{', '{{').replace('}', '}}')
s = []
self._CreatePackageIfNecessary()
if self._cipd_package.authority_for_subdir:
condition_part = ([' "condition": %r,' % self.condition]
if self.condition else [])
s.extend([
' # %s' % self.hierarchy(include_url=False),
' "%s": {' % (self.name.split(':')[0],),
' "packages": [',
])
for p in sorted(
self._cipd_root.packages(self._cipd_subdir),
cmp=lambda x, y: cmp(x.name, y.name)):
s.extend([
' {',
' "package": "%s",' % escape_cipd_var(p.name),
' "version": "%s",' % p.version,
' },',
])
s.extend([
' ],',
' "dep_type": "cipd",',
] + condition_part + [
' },',
'',
])
return s
#### gclient commands.
@subcommand.usage('[command] [args ...]')
@metrics.collector.collect_metrics('gclient recurse')
def CMDrecurse(parser, args):
"""Operates [command args ...] on all the dependencies.
Runs a shell command on all entries.
Sets GCLIENT_DEP_PATH environment variable as the dep's relative location to
root directory of the checkout.
"""
# Stop parsing at the first non-arg so that these go through to the command
parser.disable_interspersed_args()
parser.add_option('-s', '--scm', action='append', default=[],
help='Choose scm types to operate upon.')
parser.add_option('-i', '--ignore', action='store_true',
help='Ignore non-zero return codes from subcommands.')
parser.add_option('--prepend-dir', action='store_true',
help='Prepend relative dir for use with git <cmd> --null.')
parser.add_option('--no-progress', action='store_true',
help='Disable progress bar that shows sub-command updates')
options, args = parser.parse_args(args)
if not args:
print('Need to supply a command!', file=sys.stderr)
return 1
root_and_entries = gclient_utils.GetGClientRootAndEntries()
if not root_and_entries:
print(
'You need to run gclient sync at least once to use \'recurse\'.\n'
'This is because .gclient_entries needs to exist and be up to date.',
file=sys.stderr)
return 1
# Normalize options.scm to a set()
scm_set = set()
for scm in options.scm:
scm_set.update(scm.split(','))
options.scm = scm_set
options.nohooks = True
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('recurse', args, ignore_requirements=True,
progress=not options.no_progress)
@subcommand.usage('[args ...]')
@metrics.collector.collect_metrics('gclient fetch')
def CMDfetch(parser, args):
"""Fetches upstream commits for all modules.
Completely git-specific. Simply runs 'git fetch [args ...]' for each module.
"""
(options, args) = parser.parse_args(args)
return CMDrecurse(OptionParser(), [
'--jobs=%d' % options.jobs, '--scm=git', 'git', 'fetch'] + args)
class Flattener(object):
"""Flattens a gclient solution."""
def __init__(self, client, pin_all_deps=False):
"""Constructor.
Arguments:
client (GClient): client to flatten
pin_all_deps (bool): whether to pin all deps, even if they're not pinned
in DEPS
"""
self._client = client
self._deps_string = None
self._deps_files = set()
self._allowed_hosts = set()
self._deps = {}
self._hooks = []
self._pre_deps_hooks = []
self._vars = {}
self._flatten(pin_all_deps=pin_all_deps)
@property
def deps_string(self):
assert self._deps_string is not None
return self._deps_string
@property
def deps_files(self):
return self._deps_files
def _pin_dep(self, dep):
"""Pins a dependency to specific full revision sha.
Arguments:
dep (Dependency): dependency to process
"""
if dep.url is None:
return
# Make sure the revision is always fully specified (a hash),
# as opposed to refs or tags which might change. Similarly,
# shortened shas might become ambiguous; make sure to always
# use full one for pinning.
revision = gclient_utils.SplitUrlRevision(dep.url)[1]
if not revision or not gclient_utils.IsFullGitSha(revision):
dep.PinToActualRevision()
def _flatten(self, pin_all_deps=False):
"""Runs the flattener. Saves resulting DEPS string.
Arguments:
pin_all_deps (bool): whether to pin all deps, even if they're not pinned
in DEPS
"""
for solution in self._client.dependencies:
self._add_dep(solution)
self._flatten_dep(solution)
if pin_all_deps:
for dep in self._deps.itervalues():
self._pin_dep(dep)
def add_deps_file(dep):
# Only include DEPS files referenced by recursedeps.
if not dep.should_recurse:
return
deps_file = dep.deps_file
deps_path = os.path.join(self._client.root_dir, dep.name, deps_file)
if not os.path.exists(deps_path):
# gclient has a fallback that if deps_file doesn't exist, it'll try
# DEPS. Do the same here.
deps_file = 'DEPS'
deps_path = os.path.join(self._client.root_dir, dep.name, deps_file)
if not os.path.exists(deps_path):
return
assert dep.url
self._deps_files.add((dep.url, deps_file, dep.hierarchy_data()))
for dep in self._deps.itervalues():
add_deps_file(dep)
gn_args_dep = self._deps.get(self._client.dependencies[0]._gn_args_from,
self._client.dependencies[0])
self._deps_string = '\n'.join(
_GNSettingsToLines(gn_args_dep._gn_args_file, gn_args_dep._gn_args) +
_AllowedHostsToLines(self._allowed_hosts) +
_DepsToLines(self._deps) +
_HooksToLines('hooks', self._hooks) +
_HooksToLines('pre_deps_hooks', self._pre_deps_hooks) +
_VarsToLines(self._vars) +
['# %s, %s' % (url, deps_file)
for url, deps_file, _ in sorted(self._deps_files)] +
['']) # Ensure newline at end of file.
def _add_dep(self, dep):
"""Helper to add a dependency to flattened DEPS.
Arguments:
dep (Dependency): dependency to add
"""
assert dep.name not in self._deps or self._deps.get(dep.name) == dep, (
dep.name, self._deps.get(dep.name))
if dep.url:
self._deps[dep.name] = dep
def _flatten_dep(self, dep):
"""Visits a dependency in order to flatten it (see CMDflatten).
Arguments:
dep (Dependency): dependency to process
"""
logging.debug('_flatten_dep(%s)', dep.name)
assert dep.deps_parsed, (
"Attempted to flatten %s but it has not been processed." % dep.name)
self._allowed_hosts.update(dep.allowed_hosts)
# Only include vars explicitly listed in the DEPS files or gclient solution,
# not automatic, local overrides (i.e. not all of dep.get_vars()).
hierarchy = dep.hierarchy(include_url=False)
for key, value in dep._vars.iteritems():
# Make sure there are no conflicting variables. It is fine however
# to use same variable name, as long as the value is consistent.
assert key not in self._vars or self._vars[key][1] == value
self._vars[key] = (hierarchy, value)
# Override explicit custom variables.
for key, value in dep.custom_vars.iteritems():
# Do custom_vars that don't correspond to DEPS vars ever make sense? DEPS
# conditionals shouldn't be using vars that aren't also defined in the
# DEPS (presubmit actually disallows this), so any new custom_var must be
# unused in the DEPS, so no need to add it to the flattened output either.
if key not in self._vars:
continue
# Don't "override" existing vars if it's actually the same value.
elif self._vars[key][1] == value:
continue
# Anything else is overriding a default value from the DEPS.
self._vars[key] = (hierarchy + ' [custom_var override]', value)
self._pre_deps_hooks.extend([(dep, hook) for hook in dep.pre_deps_hooks])
self._hooks.extend([(dep, hook) for hook in dep.deps_hooks])
for sub_dep in dep.dependencies:
self._add_dep(sub_dep)
for d in dep.dependencies:
if d.should_recurse:
self._flatten_dep(d)
@metrics.collector.collect_metrics('gclient flatten')
def CMDflatten(parser, args):
"""Flattens the solutions into a single DEPS file."""
parser.add_option('--output-deps', help='Path to the output DEPS file')
parser.add_option(
'--output-deps-files',
help=('Path to the output metadata about DEPS files referenced by '
'recursedeps.'))
parser.add_option(
'--pin-all-deps', action='store_true',
help=('Pin all deps, even if not pinned in DEPS. CAVEAT: only does so '
'for checked out deps, NOT deps_os.'))
options, args = parser.parse_args(args)
options.nohooks = True
options.process_all_deps = True
client = GClient.LoadCurrentConfig(options)
# Only print progress if we're writing to a file. Otherwise, progress updates
# could obscure intended output.
code = client.RunOnDeps('flatten', args, progress=options.output_deps)
if code != 0:
return code
flattener = Flattener(client, pin_all_deps=options.pin_all_deps)
if options.output_deps:
with open(options.output_deps, 'w') as f:
f.write(flattener.deps_string)
else:
print(flattener.deps_string)
deps_files = [{'url': d[0], 'deps_file': d[1], 'hierarchy': d[2]}
for d in sorted(flattener.deps_files)]
if options.output_deps_files:
with open(options.output_deps_files, 'w') as f:
json.dump(deps_files, f)
return 0
def _GNSettingsToLines(gn_args_file, gn_args):
s = []
if gn_args_file:
s.extend([
'gclient_gn_args_file = "%s"' % gn_args_file,
'gclient_gn_args = %r' % gn_args,
])
return s
def _AllowedHostsToLines(allowed_hosts):
"""Converts |allowed_hosts| set to list of lines for output."""
if not allowed_hosts:
return []
s = ['allowed_hosts = [']
for h in sorted(allowed_hosts):
s.append(' "%s",' % h)
s.extend([']', ''])
return s
def _DepsToLines(deps):
"""Converts |deps| dict to list of lines for output."""
if not deps:
return []
s = ['deps = {']
for _, dep in sorted(deps.iteritems()):
s.extend(dep.ToLines())
s.extend(['}', ''])
return s
def _DepsOsToLines(deps_os):
"""Converts |deps_os| dict to list of lines for output."""
if not deps_os:
return []
s = ['deps_os = {']
for dep_os, os_deps in sorted(deps_os.iteritems()):
s.append(' "%s": {' % dep_os)
for name, dep in sorted(os_deps.iteritems()):
condition_part = ([' "condition": %r,' % dep.condition]
if dep.condition else [])
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' "%s": {' % (name,),
' "url": "%s",' % (dep.url,),
] + condition_part + [
' },',
'',
])
s.extend([' },', ''])
s.extend(['}', ''])
return s
def _HooksToLines(name, hooks):
"""Converts |hooks| list to list of lines for output."""
if not hooks:
return []
s = ['%s = [' % name]
for dep, hook in hooks:
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' {',
])
if hook.name is not None:
s.append(' "name": "%s",' % hook.name)
if hook.pattern is not None:
s.append(' "pattern": "%s",' % hook.pattern)
if hook.condition is not None:
s.append(' "condition": %r,' % hook.condition)
# Flattened hooks need to be written relative to the root gclient dir
cwd = os.path.relpath(os.path.normpath(hook.effective_cwd))
s.extend(
[' "cwd": "%s",' % cwd] +
[' "action": ['] +
[' "%s",' % arg for arg in hook.action] +
[' ]', ' },', '']
)
s.extend([']', ''])
return s
def _HooksOsToLines(hooks_os):
"""Converts |hooks| list to list of lines for output."""
if not hooks_os:
return []
s = ['hooks_os = {']
for hook_os, os_hooks in hooks_os.iteritems():
s.append(' "%s": [' % hook_os)
for dep, hook in os_hooks:
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' {',
])
if hook.name is not None:
s.append(' "name": "%s",' % hook.name)
if hook.pattern is not None:
s.append(' "pattern": "%s",' % hook.pattern)
if hook.condition is not None:
s.append(' "condition": %r,' % hook.condition)
# Flattened hooks need to be written relative to the root gclient dir
cwd = os.path.relpath(os.path.normpath(hook.effective_cwd))
s.extend(
[' "cwd": "%s",' % cwd] +
[' "action": ['] +
[' "%s",' % arg for arg in hook.action] +
[' ]', ' },', '']
)
s.extend([' ],', ''])
s.extend(['}', ''])
return s
def _VarsToLines(variables):
"""Converts |variables| dict to list of lines for output."""
if not variables:
return []
s = ['vars = {']
for key, tup in sorted(variables.iteritems()):
hierarchy, value = tup
s.extend([
' # %s' % hierarchy,
' "%s": %r,' % (key, value),
'',
])
s.extend(['}', ''])
return s
@metrics.collector.collect_metrics('gclient grep')
def CMDgrep(parser, args):
"""Greps through git repos managed by gclient.
Runs 'git grep [args...]' for each module.
"""
# We can't use optparse because it will try to parse arguments sent
# to git grep and throw an error. :-(
if not args or re.match('(-h|--help)$', args[0]):
print(
'Usage: gclient grep [-j <N>] git-grep-args...\n\n'
'Example: "gclient grep -j10 -A2 RefCountedBase" runs\n"git grep '
'-A2 RefCountedBase" on each of gclient\'s git\nrepos with up to '
'10 jobs.\n\nBonus: page output by appending "|& less -FRSX" to the'
' end of your query.',
file=sys.stderr)
return 1
jobs_arg = ['--jobs=1']
if re.match(r'(-j|--jobs=)\d+$', args[0]):
jobs_arg, args = args[:1], args[1:]
elif re.match(r'(-j|--jobs)$', args[0]):
jobs_arg, args = args[:2], args[2:]
return CMDrecurse(
parser,
jobs_arg + ['--ignore', '--prepend-dir', '--no-progress', '--scm=git',
'git', 'grep', '--null', '--color=Always'] + args)
@metrics.collector.collect_metrics('gclient root')
def CMDroot(parser, args):
"""Outputs the solution root (or current dir if there isn't one)."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if client:
print(os.path.abspath(client.root_dir))
else:
print(os.path.abspath('.'))
@subcommand.usage('[url]')
@metrics.collector.collect_metrics('gclient config')
def CMDconfig(parser, args):
"""Creates a .gclient file in the current directory.
This specifies the configuration for further commands. After update/sync,
top-level DEPS files in each module are read to determine dependent
modules to operate on as well. If optional [url] parameter is
provided, then configuration is read from a specified Subversion server
URL.
"""
# We do a little dance with the --gclientfile option. 'gclient config' is the
# only command where it's acceptable to have both '--gclientfile' and '--spec'
# arguments. So, we temporarily stash any --gclientfile parameter into
# options.output_config_file until after the (gclientfile xor spec) error
# check.
parser.remove_option('--gclientfile')
parser.add_option('--gclientfile', dest='output_config_file',
help='Specify an alternate .gclient file')
parser.add_option('--name',
help='overrides the default name for the solution')
parser.add_option('--deps-file', default='DEPS',
help='overrides the default name for the DEPS file for the '
'main solutions and all sub-dependencies')
parser.add_option('--unmanaged', action='store_true', default=False,
help='overrides the default behavior to make it possible '
'to have the main solution untouched by gclient '
'(gclient will check out unmanaged dependencies but '
'will never sync them)')
parser.add_option('--cache-dir', default=UNSET_CACHE_DIR,
help='Cache all git repos into this dir and do shared '
'clones from the cache, instead of cloning directly '
'from the remote. Pass "None" to disable cache, even '
'if globally enabled due to $GIT_CACHE_PATH.')
parser.add_option('--custom-var', action='append', dest='custom_vars',
default=[],
help='overrides variables; key=value syntax')
parser.set_defaults(config_filename=None)
(options, args) = parser.parse_args(args)
if options.output_config_file:
setattr(options, 'config_filename', getattr(options, 'output_config_file'))
if ((options.spec and args) or len(args) > 2 or
(not options.spec and not args)):
parser.error('Inconsistent arguments. Use either --spec or one or 2 args')
if (options.cache_dir is not UNSET_CACHE_DIR
and options.cache_dir.lower() == 'none'):
options.cache_dir = None
custom_vars = {}
for arg in options.custom_vars:
kv = arg.split('=', 1)
if len(kv) != 2:
parser.error('Invalid --custom-var argument: %r' % arg)
custom_vars[kv[0]] = gclient_eval.EvaluateCondition(kv[1], {})
client = GClient('.', options)
if options.spec:
client.SetConfig(options.spec)
else:
base_url = args[0].rstrip('/')
if not options.name:
name = base_url.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
else:
# specify an alternate relpath for the given URL.
name = options.name
if not os.path.abspath(os.path.join(os.getcwd(), name)).startswith(
os.getcwd()):
parser.error('Do not pass a relative path for --name.')
if any(x in ('..', '.', '/', '\\') for x in name.split(os.sep)):
parser.error('Do not include relative path components in --name.')
deps_file = options.deps_file
client.SetDefaultConfig(name, deps_file, base_url,
managed=not options.unmanaged,
cache_dir=options.cache_dir,
custom_vars=custom_vars)
client.SaveConfig()
return 0
@subcommand.epilog("""Example:
gclient pack > patch.txt
generate simple patch for configured client and dependences
""")
@metrics.collector.collect_metrics('gclient pack')
def CMDpack(parser, args):
"""Generates a patch which can be applied at the root of the tree.
Internally, runs 'git diff' on each checked out module and
dependencies, and performs minimal postprocessing of the output. The
resulting patch is printed to stdout and can be applied to a freshly
checked out tree via 'patch -p0 < patchfile'.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.remove_option('--jobs')
(options, args) = parser.parse_args(args)
# Force jobs to 1 so the stdout is not annotated with the thread ids
options.jobs = 1
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('pack', args)
@metrics.collector.collect_metrics('gclient status')
def CMDstatus(parser, args):
"""Shows modification status for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('status', args)
@subcommand.epilog("""Examples:
gclient sync
update files from SCM according to current configuration,
*for modules which have changed since last update or sync*
gclient sync --force
update files from SCM according to current configuration, for
all modules (useful for recovering files deleted from local copy)
gclient sync --revision src@31000
update src directory to r31000
JSON output format:
If the --output-json option is specified, the following document structure will
be emitted to the provided file. 'null' entries may occur for subprojects which
are present in the gclient solution, but were not processed (due to custom_deps,
os_deps, etc.)
{
"solutions" : {
"<name>": { # <name> is the posix-normalized path to the solution.
"revision": [<git id hex string>|null],
"scm": ["git"|null],
}
}
}
""")
@metrics.collector.collect_metrics('gclient sync')
def CMDsync(parser, args):
"""Checkout/update all modules."""
parser.add_option('-f', '--force', action='store_true',
help='force update even for unchanged modules')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the update is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='REV', default=[],
help='Enforces revision/hash for the solutions with the '
'format src@rev. The src@ part is optional and can be '
'skipped. You can also specify URLs instead of paths '
'and gclient will find the solution corresponding to '
'the given URL. If a path is also specified, the URL '
'takes precedence. -r can be used multiple times when '
'.gclient has multiple solutions configured, and will '
'work even if the src@ part is skipped.')
parser.add_option('--patch-ref', action='append',
dest='patch_refs', metavar='GERRIT_REF', default=[],
help='Patches the given reference with the format '
'dep@[target-ref:]patch-ref. '
'For |dep|, you can specify URLs as well as paths, '
'with URLs taking preference. '
'|patch-ref| will be applied to |dep|, rebased on top '
'of what |dep| was synced to, and a soft reset will '
'be done. Use --no-rebase-patch-ref and '
'--no-reset-patch-ref to disable this behavior. '
'|target-ref| is the target branch against which a '
'patch was created, it is used to determine which '
'commits from the |patch-ref| actually constitute a '
'patch. If not given, we will iterate over all remote '
'branches and select one that contains the revision '
'|dep| is synced at. '
'WARNING: |target-ref| will be mandatory soon.')
parser.add_option('--with_branch_heads', action='store_true',
help='Clone git "branch_heads" refspecs in addition to '
'the default refspecs. This adds about 1/2GB to a '
'full checkout. (git only)')
parser.add_option('--with_tags', action='store_true',
help='Clone git tags in addition to the default refspecs.')
parser.add_option('-H', '--head', action='store_true',
help='DEPRECATED: only made sense with safesync urls.')
parser.add_option('-D', '--delete_unversioned_trees', action='store_true',
help='Deletes from the working copy any dependencies that '
'have been removed since the last sync, as long as '
'there are no local modifications. When used with '
'--force, such dependencies are removed even if they '
'have local modifications. When used with --reset, '
'all untracked directories are removed from the '
'working copy, excluding those which are explicitly '
'ignored in the repository.')
parser.add_option('-R', '--reset', action='store_true',
help='resets any local changes before updating (git only)')
parser.add_option('-M', '--merge', action='store_true',
help='merge upstream changes instead of trying to '
'fast-forward or rebase')
parser.add_option('-A', '--auto_rebase', action='store_true',
help='Automatically rebase repositories against local '
'checkout during update (git only).')
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('--process-all-deps', action='store_true',
help='Check out all deps, even for different OS-es, '
'or with conditions evaluating to false')
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'summary information about the sync.')
parser.add_option('--no-history', action='store_true',
help='GIT ONLY - Reduces the size/time of the checkout at '
'the cost of no history. Requires Git 1.9+')
parser.add_option('--shallow', action='store_true',
help='GIT ONLY - Do a shallow clone into the cache dir. '
'Requires Git 1.9+')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t bootstrap from Google Storage.')
parser.add_option('--ignore_locks', action='store_true',
help='GIT ONLY - Ignore cache locks.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
parser.add_option('--lock_timeout', type='int', default=5000,
help='GIT ONLY - Deadline (in seconds) to wait for git '
'cache lock to become available. Default is %default.')
# TODO(agable): Remove these when the oldest CrOS release milestone is M56.
parser.add_option('-t', '--transitive', action='store_true',
help='DEPRECATED: This is a no-op.')
parser.add_option('-m', '--manually_grab_svn_rev', action='store_true',
help='DEPRECATED: This is a no-op.')
# TODO(phajdan.jr): Remove validation options once default (crbug/570091).
parser.add_option('--validate-syntax', action='store_true', default=True,
help='Validate the .gclient and DEPS syntax')
parser.add_option('--disable-syntax-validation', action='store_false',
dest='validate_syntax',
help='Disable validation of .gclient and DEPS syntax.')
parser.add_option('--no-rebase-patch-ref', action='store_false',
dest='rebase_patch_ref', default=True,
help='Bypass rebase of the patch ref after checkout.')
parser.add_option('--no-reset-patch-ref', action='store_false',
dest='reset_patch_ref', default=True,
help='Bypass calling reset after patching the ref.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.revisions and options.head:
# TODO(maruel): Make it a parser.error if it doesn't break any builder.
print('Warning: you cannot use both --head and --revision')
if options.verbose:
client.PrintLocationAndContents()
ret = client.RunOnDeps('update', args)
if options.output_json:
slns = {}
for d in client.subtree(True):
normed = d.name.replace('\\', '/').rstrip('/') + '/'
slns[normed] = {
'revision': d.got_revision,
'scm': d.used_scm.name if d.used_scm else None,
'url': str(d.url) if d.url else None,
'was_processed': d.should_process,
}
with open(options.output_json, 'wb') as f:
json.dump({'solutions': slns}, f)
return ret
CMDupdate = CMDsync
@metrics.collector.collect_metrics('gclient validate')
def CMDvalidate(parser, args):
"""Validates the .gclient and DEPS syntax."""
options, args = parser.parse_args(args)
options.validate_syntax = True
client = GClient.LoadCurrentConfig(options)
rv = client.RunOnDeps('validate', args)
if rv == 0:
print('validate: SUCCESS')
else:
print('validate: FAILURE')
return rv
@metrics.collector.collect_metrics('gclient diff')
def CMDdiff(parser, args):
"""Displays local diff for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('diff', args)
@metrics.collector.collect_metrics('gclient revert')
def CMDrevert(parser, args):
"""Reverts all modifications in every dependencies.
That's the nuclear option to get back to a 'clean' state. It removes anything
that shows up in git status."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the revert is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
(options, args) = parser.parse_args(args)
# --force is implied.
options.force = True
options.reset = False
options.delete_unversioned_trees = False
options.merge = False
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('revert', args)
@metrics.collector.collect_metrics('gclient runhooks')
def CMDrunhooks(parser, args):
"""Runs hooks for files that have been modified in the local working copy."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-f', '--force', action='store_true', default=True,
help='Deprecated. No effect.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
options.force = True
options.nohooks = False
return client.RunOnDeps('runhooks', args)
@metrics.collector.collect_metrics('gclient revinfo')
def CMDrevinfo(parser, args):
"""Outputs revision info mapping for the client and its dependencies.
This allows the capture of an overall 'revision' for the source tree that
can be used to reproduce the same tree in the future. It is only useful for
'unpinned dependencies', i.e. DEPS/deps references without a git hash.
A git branch name isn't 'pinned' since the actual commit can change.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-a', '--actual', action='store_true',
help='gets the actual checked out revisions instead of the '
'ones specified in the DEPS and .gclient files')
parser.add_option('-s', '--snapshot', action='store_true',
help='creates a snapshot .gclient file of the current '
'version of all repositories to reproduce the tree, '
'implies -a')
parser.add_option('--filter', action='append', dest='filter',
help='Display revision information only for the specified '
'dependencies (filtered by URL or path).')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'information about the revisions.')
parser.add_option('--ignore-dep-type', choices=['git', 'cipd'],
help='Specify to skip processing of a certain type of dep.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.PrintRevInfo()
return 0
@metrics.collector.collect_metrics('gclient getdep')
def CMDgetdep(parser, args):
"""Gets revision information and variable values from a DEPS file."""
parser.add_option('--var', action='append',
dest='vars', metavar='VAR', default=[],
help='Gets the value of a given variable.')
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='DEP', default=[],
help='Gets the revision/version for the given dependency. '
'If it is a git dependency, dep must be a path. If it '
'is a CIPD dependency, dep must be of the form '
'path:package.')
parser.add_option('--deps-file', default='DEPS',
# TODO(ehmaldonado): Try to find the DEPS file pointed by
# .gclient first.
help='The DEPS file to be edited. Defaults to the DEPS '
'file in the current directory.')
(options, args) = parser.parse_args(args)
if not os.path.isfile(options.deps_file):
raise gclient_utils.Error(
'DEPS file %s does not exist.' % options.deps_file)
with open(options.deps_file) as f:
contents = f.read()
local_scope = gclient_eval.Exec(contents, options.deps_file)
for var in options.vars:
print(gclient_eval.GetVar(local_scope, var))
for name in options.revisions:
if ':' in name:
name, _, package = name.partition(':')
if not name or not package:
parser.error(
'Wrong CIPD format: %s:%s should be of the form path:pkg.'
% (name, package))
print(gclient_eval.GetCIPD(local_scope, name, package))
else:
print(gclient_eval.GetRevision(local_scope, name))
@metrics.collector.collect_metrics('gclient setdep')
def CMDsetdep(parser, args):
"""Modifies dependency revisions and variable values in a DEPS file"""
parser.add_option('--var', action='append',
dest='vars', metavar='VAR=VAL', default=[],
help='Sets a variable to the given value with the format '
'name=value.')
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='DEP@REV', default=[],
help='Sets the revision/version for the dependency with '
'the format dep@rev. If it is a git dependency, dep '
'must be a path and rev must be a git hash or '
'reference (e.g. src/dep@deadbeef). If it is a CIPD '
'dependency, dep must be of the form path:package and '
'rev must be the package version '
'(e.g. src/pkg:chromium/pkg@2.1-cr0).')
parser.add_option('--deps-file', default='DEPS',
# TODO(ehmaldonado): Try to find the DEPS file pointed by
# .gclient first.
help='The DEPS file to be edited. Defaults to the DEPS '
'file in the current directory.')
(options, args) = parser.parse_args(args)
if args:
parser.error('Unused arguments: "%s"' % '" "'.join(args))
if not options.revisions and not options.vars:
parser.error(
'You must specify at least one variable or revision to modify.')
if not os.path.isfile(options.deps_file):
raise gclient_utils.Error(
'DEPS file %s does not exist.' % options.deps_file)
with open(options.deps_file) as f:
contents = f.read()
local_scope = gclient_eval.Exec(contents, options.deps_file)
for var in options.vars:
name, _, value = var.partition('=')
if not name or not value:
parser.error(
'Wrong var format: %s should be of the form name=value.' % var)
if name in local_scope['vars']:
gclient_eval.SetVar(local_scope, name, value)
else:
gclient_eval.AddVar(local_scope, name, value)
for revision in options.revisions:
name, _, value = revision.partition('@')
if not name or not value:
parser.error(
'Wrong dep format: %s should be of the form dep@rev.' % revision)
if ':' in name:
name, _, package = name.partition(':')
if not name or not package:
parser.error(
'Wrong CIPD format: %s:%s should be of the form path:pkg@version.'
% (name, package))
gclient_eval.SetCIPD(local_scope, name, package, value)
else:
gclient_eval.SetRevision(local_scope, name, value)
with open(options.deps_file, 'w') as f:
f.write(gclient_eval.RenderDEPSFile(local_scope))
@metrics.collector.collect_metrics('gclient verify')
def CMDverify(parser, args):
"""Verifies the DEPS file deps are only from allowed_hosts."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.RunOnDeps(None, [])
# Look at each first-level dependency of this gclient only.
for dep in client.dependencies:
bad_deps = dep.findDepsFromNotAllowedHosts()
if not bad_deps:
continue
print("There are deps from not allowed hosts in file %s" % dep.deps_file)
for bad_dep in bad_deps:
print("\t%s at %s" % (bad_dep.name, bad_dep.url))
print("allowed_hosts:", ', '.join(dep.allowed_hosts))
sys.stdout.flush()
raise gclient_utils.Error(
'dependencies from disallowed hosts; check your DEPS file.')
return 0
@subcommand.epilog("""For more information on what metrics are we collecting and
why, please read metrics.README.md or visit https://bit.ly/2ufRS4p""")
@metrics.collector.collect_metrics('gclient metrics')
def CMDmetrics(parser, args):
"""Reports, and optionally modifies, the status of metric collection."""
parser.add_option('--opt-in', action='store_true', dest='enable_metrics',
help='Opt-in to metrics collection.',
default=None)
parser.add_option('--opt-out', action='store_false', dest='enable_metrics',
help='Opt-out of metrics collection.')
options, args = parser.parse_args(args)
if args:
parser.error('Unused arguments: "%s"' % '" "'.join(args))
if not metrics.collector.config.is_googler:
print("You're not a Googler. Metrics collection is disabled for you.")
return 0
if options.enable_metrics is not None:
metrics.collector.config.opted_in = options.enable_metrics
if metrics.collector.config.opted_in is None:
print("You haven't opted in or out of metrics collection.")
elif metrics.collector.config.opted_in:
print("You have opted in. Thanks!")
else:
print("You have opted out. Please consider opting in.")
return 0
class OptionParser(optparse.OptionParser):
gclientfile_default = os.environ.get('GCLIENT_FILE', '.gclient')
def __init__(self, **kwargs):
optparse.OptionParser.__init__(
self, version='%prog ' + __version__, **kwargs)
# Some arm boards have issues with parallel sync.
if platform.machine().startswith('arm'):
jobs = 1
else:
jobs = max(8, gclient_utils.NumLocalCpus())
self.add_option(
'-j', '--jobs', default=jobs, type='int',
help='Specify how many SCM commands can run in parallel; defaults to '
'%default on this machine')
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Produces additional output for diagnostics. Can be used up to '
'three times for more logging info.')
self.add_option(
'--gclientfile', dest='config_filename',
help='Specify an alternate %s file' % self.gclientfile_default)
self.add_option(
'--spec',
help='create a gclient file containing the provided string. Due to '
'Cygwin/Python brokenness, it can\'t contain any newlines.')
self.add_option(
'--no-nag-max', default=False, action='store_true',
help='Ignored for backwards compatibility.')
def parse_args(self, args=None, _values=None):
"""Integrates standard options processing."""
# Create an optparse.Values object that will store only the actual passed
# options, without the defaults.
actual_options = optparse.Values()
_, args = optparse.OptionParser.parse_args(self, args, actual_options)
# Create an optparse.Values object with the default options.
options = optparse.Values(self.get_default_values().__dict__)
# Update it with the options passed by the user.
options._update_careful(actual_options.__dict__)
# Store the options passed by the user in an _actual_options attribute.
# We store only the keys, and not the values, since the values can contain
# arbitrary information, which might be PII.
metrics.collector.add('arguments', actual_options.__dict__.keys())
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='%(module)s(%(lineno)d) %(funcName)s:%(message)s')
if options.config_filename and options.spec:
self.error('Cannot specifiy both --gclientfile and --spec')
if (options.config_filename and
options.config_filename != os.path.basename(options.config_filename)):
self.error('--gclientfile target must be a filename, not a path')
if not options.config_filename:
options.config_filename = self.gclientfile_default
options.entries_filename = options.config_filename + '_entries'
if options.jobs < 1:
self.error('--jobs must be 1 or higher')
# These hacks need to die.
if not hasattr(options, 'revisions'):
# GClient.RunOnDeps expects it even if not applicable.
options.revisions = []
if not hasattr(options, 'head'):
options.head = None
if not hasattr(options, 'nohooks'):
options.nohooks = True
if not hasattr(options, 'noprehooks'):
options.noprehooks = True
if not hasattr(options, 'deps_os'):
options.deps_os = None
if not hasattr(options, 'force'):
options.force = None
return (options, args)
def disable_buffering():
# Make stdout auto-flush so buildbot doesn't kill us during lengthy
# operations. Python as a strong tendency to buffer sys.stdout.
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
# Make stdout annotated with the thread ids.
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout)
def main(argv):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
if sys.hexversion < 0x02060000:
print(
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0],
file=sys.stderr)
return 2
if not sys.executable:
print(
'\nPython cannot find the location of it\'s own executable.\n',
file=sys.stderr)
return 2
fix_encoding.fix_encoding()
disable_buffering()
setup_color.init()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except KeyboardInterrupt:
gclient_utils.GClientChildren.KillAllRemainingChildren()
raise
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
print('Error: %s' % str(e), file=sys.stderr)
return 1
finally:
gclient_utils.PrintWarnings()
return 0
if '__main__' == __name__:
with metrics.collector.print_notice_and_exit():
sys.exit(main(sys.argv[1:]))
# vim: ts=2:sw=2:tw=80:et:
|
# coding: utf-8
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from fbmq import Page
from example.config import CONFIG
page = Page(CONFIG['FACEBOOK_TOKEN'])
@page.after_send
def after_send(payload, response):
print('AFTER_SEND : ' + payload.to_json())
print('RESPONSE : ' + response.text)
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent utilities, incl. choosing the move and running in separate process."""
import collections
import functools
import multiprocessing
from typing import Any, Callable
import flax
import jax
import numpy as np
import env_utils
@functools.partial(jax.jit, static_argnums=0)
def policy_action(
apply_fn: Callable[..., Any],
params: flax.core.frozen_dict.FrozenDict,
state: np.ndarray):
"""Forward pass of the network.
Args:
params: the parameters of the actor-critic model
module: the actor-critic model
state: the input for the forward pass
Returns:
out: a tuple (log_probabilities, values)
"""
out = apply_fn({'params': params}, state)
return out
ExpTuple = collections.namedtuple(
'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])
class RemoteSimulator:
"""Wrap functionality for an agent emulating Atari in a separate process.
An object of this class is created for every agent.
"""
def __init__(self, game: str):
"""Start the remote process and create Pipe() to communicate with it."""
parent_conn, child_conn = multiprocessing.Pipe()
self.proc = multiprocessing.Process(
target=rcv_action_send_exp, args=(child_conn, game))
self.proc.daemon = True
self.conn = parent_conn
self.proc.start()
def rcv_action_send_exp(conn, game: str):
"""Run the remote agents.
Receive action from the main learner, perform one step of simulation and
send back collected experience.
"""
env = env_utils.create_env(game, clip_rewards=True)
while True:
obs = env.reset()
done = False
# Observations fetched from Atari env need additional batch dimension.
state = obs[None, ...]
while not done:
conn.send(state)
action = conn.recv()
obs, reward, done, _ = env.step(action)
next_state = obs[None, ...] if not done else None
experience = (state, action, reward, done)
conn.send(experience)
if done:
break
state = next_state
|
# This is an auto-generated file. Do not edit it.
from twisted.python import versions
version = versions.Version('twisted.conch', 12, 3, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.