id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
135522
|
from PyQt4 import QtGui
from PySide import QtGui
import dictionaries.constants as cs
################################################################################
class InfoDialog(QtGui.QWidget):
def __init__(self, info_message):
super(InfoDialog, self).__init__()
self.build_label(info_message)
self.build_button()
self.build_layout()
def build_layout(self):
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self._bn_close_info_dialog)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self._lb_info_message)
vbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.resize(*cs.RESIZE_INFO)
self.move(*cs.MOVE_INFO)
self.setWindowTitle(cs.WINDOW_TITLE_INFO)
def build_label(self, info_message):
self._lb_info_message = QtGui.QLabel(info_message, self)
self._lb_info_message.setFont(QtGui.QFont(cs.FONT, cs.FONTSIZE-1))
self._lb_info_message.adjustSize()
self._lb_info_message.move(30*1.0/2, 30*1.0/2)
def build_button(self):
label = "Close"
self._bn_close_info_dialog = QtGui.QPushButton(label)
self._bn_close_info_dialog.setFont(QtGui.QFont(cs.FONT, cs.FONTSIZE))
self._bn_close_info_dialog.adjustSize()
self._bn_close_info_dialog.clicked.connect(self.bn_close_info_dialog)
def bn_close_info_dialog(self):
self.close()
|
135529
|
def is_prime(num, primes):
for prime in primes:
if prime == num:
return True
if not num % prime:
return False
return True
def get_primes(num):
limit = (num // 2) + 1
candidates = list()
primes = list()
for i in range(2, limit):
if is_prime(i, primes):
primes.append(i)
candidates.append((i, num - i))
new_candidates = list()
for first, second in candidates[::-1]:
if is_prime(second, primes):
primes.append(second)
new_candidates.append((first, second))
return new_candidates[-1]
assert get_primes(4) == (2, 2)
assert get_primes(10) == (3, 7)
assert get_primes(100) == (3, 97)
|
135580
|
from .checks import check_unique_service_names, check_env_file_exists
from .cli import cli
def main() -> None:
try:
check_unique_service_names()
check_env_file_exists()
except RuntimeError as e:
print(e)
exit(1)
cli()
main()
|
135604
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from armulator.armv6.shift import shift
class Pkhbt(AbstractOpcode):
def __init__(self, tb_form, m, d, n, shift_t, shift_n):
super(Pkhbt, self).__init__()
self.tb_form = tb_form
self.m = m
self.d = d
self.n = n
self.shift_t = shift_t
self.shift_n = shift_n
def execute(self, processor):
if processor.condition_passed():
operand2 = shift(processor.registers.get(self.m), self.shift_t, self.shift_n,
processor.registers.cpsr.get_c())
temp_rd = processor.registers.get(self.n)[0:16] if self.tb_form else operand2[0:16]
temp_rd += operand2[16:32] if self.tb_form else processor.registers.get(self.n)[16:32]
processor.registers.set(self.d, temp_rd)
|
135612
|
import os
import numpy as np
import argparse
import time
import torch
import torchvision
import cv2
def yolo_forward_dynamic(output, num_classes, anchors, num_anchors, scale_x_y):
# Output would be invalid if it does not satisfy this assert
# assert (output.size(1) == (5 + num_classes) * num_anchors)
# print(output.size())
# Slice the second dimension (channel) of output into:
# [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
# And then into
# bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
# batch = output.size(0)
# H = output.size(2)
# W = output.size(3)
bxy_list = []
bwh_list = []
det_confs_list = []
cls_confs_list = []
for i in range(num_anchors):
begin = i * (5 + num_classes)
end = (i + 1) * (5 + num_classes)
bxy_list.append(output[:, begin: begin + 2])
bwh_list.append(output[:, begin + 2: begin + 4])
det_confs_list.append(output[:, begin + 4: begin + 5])
cls_confs_list.append(output[:, begin + 5: end])
# Shape: [batch, num_anchors * 2, H, W]
bxy = torch.cat(bxy_list, dim=1)
# Shape: [batch, num_anchors * 2, H, W]
bwh = torch.cat(bwh_list, dim=1)
# Shape: [batch, num_anchors, H, W]
det_confs = torch.cat(det_confs_list, dim=1)
# Shape: [batch, num_anchors * H * W]
# print(output.size(0),num_anchors * output.size(2) * output.size(3))
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3))
# Shape: [batch, num_anchors * num_classes, H, W]
cls_confs = torch.cat(cls_confs_list, dim=1)
# Shape: [batch, num_anchors, num_classes, H * W]
print(num_anchors, output.size(0), output.size(2), output.size(3))
cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3))
# Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3),
num_classes)
# Apply sigmoid(), exp() and softmax() to slices
print(bxy)
bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
bwh = torch.exp(bwh)
det_confs = torch.sigmoid(det_confs)
cls_confs = torch.sigmoid(cls_confs)
# Prepare C-x, C-y, P-w, P-h (None of them are torch related)
grid_x = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0),
axis=0)
grid_y = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0),
axis=0)
# grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
# grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
anchor_w = []
anchor_h = []
for i in range(num_anchors):
anchor_w.append(anchors[i * 2])
anchor_h.append(anchors[i * 2 + 1])
device = None
cuda_check = output.is_cuda
if cuda_check:
device = output.get_device()
bx_list = []
by_list = []
bw_list = []
bh_list = []
# Apply C-x, C-y, P-w, P-h
for i in range(num_anchors):
ii = i * 2
# Shape: [batch, 1, H, W]
bx = bxy[:, ii: ii + 1] + torch.tensor(grid_x, device=device,
dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
by = bxy[:, ii + 1: ii + 2] + torch.tensor(grid_y, device=device,
dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
bw = bwh[:, ii: ii + 1] * anchor_w[i]
# Shape: [batch, 1, H, W]
bh = bwh[:, ii + 1: ii + 2] * anchor_h[i]
bx_list.append(bx)
by_list.append(by)
bw_list.append(bw)
bh_list.append(bh)
########################################
# Figure out bboxes from slices #
########################################
# Shape: [batch, num_anchors, H, W]
bx = torch.cat(bx_list, dim=1)
# Shape: [batch, num_anchors, H, W]
by = torch.cat(by_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bw = torch.cat(bw_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bh = torch.cat(bh_list, dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
bx_bw = torch.cat((bx, bw), dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
by_bh = torch.cat((by, bh), dim=1)
# normalize coordinates to [0, 1]
bx_bw /= output.size(3)
by_bh /= output.size(2)
# Shape: [batch, num_anchors * H * W, 1]
bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bx1 = bx - bw * 0.5
by1 = by - bh * 0.5
bx2 = bx1 + bw
by2 = by1 + bh
# Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3),
1, 4)
# boxes = boxes.repeat(1, 1, num_classes, 1)
# boxes: [batch, num_anchors * H * W, 1, 4]
# cls_confs: [batch, num_anchors * H * W, num_classes]
# det_confs: [batch, num_anchors * H * W]
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
confs = cls_confs * det_confs
# boxes: [batch, num_anchors * H * W, 1, 4]
# confs: [batch, num_anchors * H * W, num_classes]
return boxes, confs
class YoloLayer(object):
''' Yolo layer
model_out: while inference,is post-processing inside or outside the model
true:outside
'''
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False):
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors) // num_anchors
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.stride = stride
self.seen = 0
self.scale_x_y = 1
self.model_out = model_out
def forward(self, output):
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step]
masked_anchors = [anchor / self.stride for anchor in masked_anchors]
print(masked_anchors)
return yolo_forward_dynamic(output, self.num_classes, masked_anchors,
len(self.anchor_mask), scale_x_y=self.scale_x_y)
def get_region_boxes(boxes_and_confs):
# print('Getting boxes from boxes and confs ...')
boxes_list = []
confs_list = []
for item in boxes_and_confs:
boxes_list.append(item[0])
confs_list.append(item[1])
# boxes: [batch, num1 + num2 + num3, 1, 4]
# confs: [batch, num1 + num2 + num3, num_classes]
boxes = torch.cat(boxes_list, dim=1)
confs = torch.cat(confs_list, dim=1)
return [boxes, confs]
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def nms(conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append(
[ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3],
ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
return bboxes_batch
def post_process(flags):
names = np.loadtxt(flags.coco_class_names, dtype='str', delimiter='\n')
# 读取bin文件用于生成预测结果
bin_path = flags.bin_data_path
ori_path = flags.origin_jpg_path
anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
num_classes = 80
det_results_path = flags.det_results_path
os.makedirs(det_results_path, exist_ok=True)
total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name])
yolo1 = YoloLayer(anchor_mask=[0, 1, 2], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=8)
yolo2 = YoloLayer(anchor_mask=[3, 4, 5], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=16)
yolo3 = YoloLayer(anchor_mask=[6, 7, 8], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=32)
yolo_shape = [[1, 255, 76, 76], [1, 255, 38, 38], [1, 255, 19, 19]]
for bin_file in sorted(total_img):
path_base = os.path.join(bin_path, bin_file)
print(path_base)
# print('\n', os.path.join(ori_path, '{}.jpg'.format(bin_file)), '\n')
src_img = cv2.imread(os.path.join(ori_path, '{}.jpg'.format(bin_file)))
assert src_img is not None, 'Image Not Found ' + bin_file
# 加载检测的所有输出tensor
feature_map_1 = np.fromfile(path_base + "_" + '1' + ".bin", dtype="float32").reshape(yolo_shape[0])
feature_map_2 = np.fromfile(path_base + "_" + '2' + ".bin", dtype="float32").reshape(yolo_shape[1])
feature_map_3 = np.fromfile(path_base + "_" + '3' + ".bin", dtype="float32").reshape(yolo_shape[2])
pred_1 = yolo1.forward(torch.from_numpy(feature_map_1))
pred_2 = yolo2.forward(torch.from_numpy(feature_map_2))
pred_3 = yolo3.forward(torch.from_numpy(feature_map_3))
# nms
output = get_region_boxes([pred_1, pred_2, pred_3])
pred = nms(conf_thresh=0.4, nms_thresh=0.6, output=output)[0]
# save result
det_results_file = os.path.join(det_results_path, bin_file + ".txt")
print(det_results_file)
with open(det_results_file, 'w') as f:
width = src_img.shape[1]
height = src_img.shape[0]
for i in range(len(pred)):
box = pred[i]
x1 = int(box[0] * width)
y1 = int(box[1] * height)
x2 = int(box[2] * width)
y2 = int(box[3] * height)
cls_conf = box[4]
cls_id = box[5]
content = '{} {} {} {} {} {}'.format(names[int(cls_id)], cls_conf, x1, y1, x2, y2)
print(content)
f.write(content)
f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0")
parser.add_argument("--origin_jpg_path", default="./val2014/")
parser.add_argument("--det_results_path",
default="./detection-results/")
parser.add_argument("--coco_class_names", default="./coco2014.names")
parser.add_argument("--net_out_num", default=3)
flags = parser.parse_args()
post_process(flags)
|
135625
|
from pak.datasets.Dataset import Dataset
import numpy as np
import zipfile
import tarfile
import urllib.request
import shutil
from os import makedirs, listdir
from os.path import join, isfile, isdir, exists, splitext
from scipy.ndimage import imread
from scipy.misc import imresize
from scipy.io import loadmat
from skimage.transform import resize
from pak import utils
from pak.util import mpii_human_pose as mpii_hp
import h5py
from enum import Enum
class EgoHands_config(Enum):
Polygon = 1 # polygon as in the original data
AABB = 2 # AABB for simplification
class EgoHands(Dataset):
def __init__(self, root, verbose=True):
""" ctro
"""
Dataset.__init__(self, "egohands_data", root, verbose)
url = 'http://vision.soic.indiana.edu/egohands_files/egohands_data.zip'
self.root_export = join(root, "egohands_data")
self.download_and_unzip(url)
def get_raw(self, config=EgoHands_config.Polygon, memmapped=False):
"""
"""
# step 1: get all videos
labelled_samples_url = join(self.root_export, '_LABELLED_SAMPLES')
all_videos = [join(labelled_samples_url, f) for f in \
listdir(labelled_samples_url) if isdir(join(labelled_samples_url, f))]
# step 2: load video frames and polygon dataset
Y = []
if memmapped:
X_shape = (48, 100, 720, 1280, 3)
fmmap = join(self.root_export, 'egohands.memmap')
fmmap_exists = isfile(fmmap)
if not fmmap_exists:
X = np.memmap(fmmap, dtype='uint8', mode='w+', shape=X_shape)
else:
X = []
for vindx, vurl in enumerate(all_videos):
imgs = sorted([f \
for f in listdir(vurl) if isfile(join(vurl, f)) and \
f.endswith('jpg')])
assert(len(imgs) == 100) # sanity check
if memmapped:
if not fmmap_exists:
# if we already created the memmap file we do NOT
# want to recreate it!
imgs = np.array([imread(join(vurl, f)) for f in imgs], \
'uint8')
X[vindx] = imgs
else:
imgs = np.array([imread(join(vurl, f)) for f in imgs], 'uint8')
X.append(imgs)
polygon_url = join(vurl, 'polygons.mat')
M = loadmat(polygon_url)['polygons'][0]
Y_single_video = []
for i in range(100):
V = M[i]
Y_single_frame = []
for hand in range(4):
H = V[hand]
#if len(H) > 0:
if config is EgoHands_config.Polygon:
Y_single_frame.append(H)
elif len(H) > 1: # meaning: hand is not visible
x = H[:,0]
y = H[:,1]
top_right = (np.max(x), np.max(y))
bottom_left = (np.min(x), np.min(y))
Y_single_frame.append((top_right, bottom_left))
Y_single_video.append(Y_single_frame)
Y.append(Y_single_video)
# step 2: read metadata
#M = loadmat(join(self.root_export, 'metadata.mat'))
#
# M = loadmat(join(self.root_export, '_LABELLED_SAMPLES/CARDS_COURTYARD_B_T/polygons.mat'))
#
# X = imread(join(labelled_samples_url, 'CARDS_COURTYARD_B_T/frame_0011.jpg'))
if memmapped:
if not fmmap_exists:
#del X # flush the file
utils.talk('flush memmap to file', self.verbose)
X.flush() # write memmap to files
del X
X = np.memmap(fmmap, dtype='uint8', mode='r', shape=X_shape)
else:
X = np.array(X, 'uint8')
return X, np.array(Y)
|
135631
|
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
def addEdge(self, u, v, w):
self.graph.append([u, v, w])
def find(self, parent, i):
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, x, y):
xroot = self.find(parent, x)
yroot = self.find(parent, y)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
parent[yroot] = xroot
rank[xroot] += 1
def KruskalMST(self):
result = []
i = 0
e = 0
self.graph = sorted(self.graph,
key=lambda item: item[2])
parent = []
rank = []
for node in range(self.V):
parent.append(node)
rank.append(0)
while e < self.V - 1:
u, v, w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent, v)
if x != y:
e = e + 1
result.append([u, v, w])
self.union(parent, rank, x, y)
minimumCost = 0
print ("Edges in the constructed MST")
for u, v, weight in result:
minimumCost += weight
print("%d -- %d == %d" % (u, v, weight))
print("Minimum Spanning Tree" , minimumCost)
g = Graph(4)
g.addEdge(0, 1, 10)
g.addEdge(0, 2, 6)
g.addEdge(0, 3, 5)
g.addEdge(1, 3, 15)
g.addEdge(2, 3, 4)
# Function call
g.KruskalMST()
|
135678
|
from itty import *
from tropo import Tropo, Result, MachineDetection
@post('/index.json')
def index(request):
t = Tropo()
mc = MachineDetection(introduction="This is a test. Please hold while I determine if you are a Machine or Human. Processing. Finished. THank you for your patience.", voice="Victor").json
t.call(to="+14071234321", machineDetection=mc)
t.on(event="continue", next="/continue.json")
return t.RenderJson()
@post("/continue.json")
def index(request):
r = Result(request.body)
t = Tropo()
userType = r.getUserType()
t.say("You are a " + userType)
return t.RenderJson()
run_itty(server='wsgiref', host='0.0.0.0', port=8888)
|
135686
|
import json
import random
from datetime import datetime, timedelta
from flask import current_app
from notifications_utils.s3 import s3upload
from requests import HTTPError, request
from app import notify_celery
from app.aws.s3 import file_exists
from app.celery.process_ses_receipts_tasks import process_ses_results
from app.config import QueueNames
from app.models import SMS_TYPE
temp_fail = "7700900003"
perm_fail = "7700900002"
delivered = "7700900001"
delivered_email = "<EMAIL>"
perm_fail_email = "<EMAIL>"
temp_fail_email = "<EMAIL>"
def send_sms_response(provider, reference, to):
if provider == "mmg":
body = mmg_callback(reference, to)
headers = {"Content-type": "application/json"}
else:
headers = {"Content-type": "application/x-www-form-urlencoded"}
body = firetext_callback(reference, to)
# to simulate getting a temporary_failure from firetext
# we need to send a pending status updated then a permanent-failure
if body['status'] == '2': # pending status
make_request(SMS_TYPE, provider, body, headers)
# 1 is a declined status for firetext, will result in a temp-failure
body = {'mobile': to,
'status': "1",
'time': '2016-03-10 14:17:00',
'reference': reference
}
make_request(SMS_TYPE, provider, body, headers)
def send_email_response(reference, to):
if to == perm_fail_email:
body = ses_hard_bounce_callback(reference)
elif to == temp_fail_email:
body = ses_soft_bounce_callback(reference)
else:
body = ses_notification_callback(reference)
process_ses_results.apply_async([body], queue=QueueNames.RESEARCH_MODE)
def make_request(notification_type, provider, data, headers):
api_call = "{}/notifications/{}/{}".format(current_app.config["API_HOST_NAME"], notification_type, provider)
try:
response = request(
"POST",
api_call,
headers=headers,
data=data,
timeout=60
)
response.raise_for_status()
except HTTPError as e:
current_app.logger.error(
"API POST request on {} failed with status {}".format(
api_call,
e.response.status_code
)
)
raise e
finally:
current_app.logger.info("Mocked provider callback request finished")
return response.json()
def mmg_callback(notification_id, to):
"""
status: 3 - delivered
status: 4 - expired (temp failure)
status: 5 - rejected (perm failure)
"""
if to.strip().endswith(temp_fail):
status = "4"
elif to.strip().endswith(perm_fail):
status = "5"
else:
status = "3"
return json.dumps({"reference": "mmg_reference",
"CID": str(notification_id),
"MSISDN": to,
"status": status,
"deliverytime": "2016-04-05 16:01:07"})
def firetext_callback(notification_id, to):
"""
status: 0 - delivered
status: 1 - perm failure
"""
if to.strip().endswith(perm_fail):
status = "1"
elif to.strip().endswith(temp_fail):
status = "2"
else:
status = "0"
return {
'mobile': to,
'status': status,
'time': '2016-03-10 14:17:00',
'reference': notification_id
}
@notify_celery.task(bind=True, name="create-fake-letter-response-file", max_retries=5, default_retry_delay=300)
def create_fake_letter_response_file(self, reference):
now = datetime.utcnow()
dvla_response_data = '{}|Sent|0|Sorted'.format(reference)
# try and find a filename that hasn't been taken yet - from a random time within the last 30 seconds
for i in sorted(range(30), key=lambda _: random.random()):
upload_file_name = 'NOTIFY-{}-RSP.TXT'.format((now - timedelta(seconds=i)).strftime('%Y%m%d%H%M%S'))
if not file_exists(current_app.config['DVLA_RESPONSE_BUCKET_NAME'], upload_file_name):
break
else:
raise ValueError(
'cant create fake letter response file for {} - too many files for that time already exist on s3'.format(
reference
)
)
s3upload(
filedata=dvla_response_data,
region=current_app.config['AWS_REGION'],
bucket_name=current_app.config['DVLA_RESPONSE_BUCKET_NAME'],
file_location=upload_file_name
)
current_app.logger.info("Fake DVLA response file {}, content [{}], uploaded to {}, created at {}".format(
upload_file_name, dvla_response_data, current_app.config['DVLA_RESPONSE_BUCKET_NAME'], now))
# on development we can't trigger SNS callbacks so we need to manually hit the DVLA callback endpoint
if current_app.config['NOTIFY_ENVIRONMENT'] == 'development':
make_request('letter', 'dvla', _fake_sns_s3_callback(upload_file_name), None)
def _fake_sns_s3_callback(filename):
message_contents = '{"Records":[{"s3":{"object":{"key":"%s"}}}]}' % (filename) # noqa
return json.dumps({
"Type": "Notification",
"MessageId": "some-message-id",
"Message": message_contents
})
def ses_notification_callback(reference):
ses_message_body = {
'delivery': {
'processingTimeMillis': 2003,
'recipients': ['<EMAIL>'],
'remoteMtaIp': '123.123.123.123',
'reportingMTA': 'a7-32.smtp-out.eu-west-1.amazonses.com',
'smtpResponse': '250 2.6.0 Message received',
'timestamp': '2017-11-17T12:14:03.646Z'
},
'mail': {
'commonHeaders': {
'from': ['TEST <<EMAIL>>'],
'subject': 'lambda test',
'to': ['<EMAIL>']
},
'destination': ['<EMAIL>'],
'headers': [
{
'name': 'From',
'value': 'TEST <<EMAIL>>'
},
{
'name': 'To',
'value': '<EMAIL>'
},
{
'name': 'Subject',
'value': 'lambda test'
},
{
'name': 'MIME-Version',
'value': '1.0'
},
{
'name': 'Content-Type',
'value': 'multipart/alternative; boundary="----=_Part_617203_1627511946.1510920841645"'
}
],
'headersTruncated': False,
'messageId': reference,
'sendingAccountId': '12341234',
'source': '"TEST" <<EMAIL>>',
'sourceArn': 'arn:aws:ses:eu-west-1:12341234:identity/notify.works',
'sourceIp': '0.0.0.1',
'timestamp': '2017-11-17T12:14:01.643Z'
},
'notificationType': 'Delivery'
}
return {
'Type': 'Notification',
'MessageId': '8e83c020-1234-1234-1234-92a8ee9baa0a',
'TopicArn': 'arn:aws:sns:eu-west-1:12341234:ses_notifications',
'Subject': None,
'Message': json.dumps(ses_message_body),
'Timestamp': '2017-11-17T12:14:03.710Z',
'SignatureVersion': '1',
'Signature': '[REDACTED]',
'SigningCertUrl': 'https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-[REDACTED].pem',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=[REACTED]',
'MessageAttributes': {}
}
def ses_hard_bounce_callback(reference):
return _ses_bounce_callback(reference, 'Permanent')
def ses_soft_bounce_callback(reference):
return _ses_bounce_callback(reference, 'Temporary')
def _ses_bounce_callback(reference, bounce_type):
ses_message_body = {
'bounce': {
'bounceSubType': 'General',
'bounceType': bounce_type,
'bouncedRecipients': [{
'action': 'failed',
'diagnosticCode': 'smtp; 550 5.1.1 user unknown',
'emailAddress': '<EMAIL>',
'status': '5.1.1'
}],
'feedbackId': '0102015fc9e676fb-12341234-1234-1234-1234-9301e86a4fa8-000000',
'remoteMtaIp': '192.168.127.12',
'reportingMTA': 'dsn; a7-31.smtp-out.eu-west-1.amazonses.com',
'timestamp': '2017-11-17T12:14:05.131Z'
},
'mail': {
'commonHeaders': {
'from': ['TEST <<EMAIL>>'],
'subject': 'ses callback test',
'to': ['<EMAIL>']
},
'destination': ['<EMAIL>'],
'headers': [
{
'name': 'From',
'value': 'TEST <<EMAIL>>'
},
{
'name': 'To',
'value': '<EMAIL>'
},
{
'name': 'Subject',
'value': 'lambda test'
},
{
'name': 'MIME-Version',
'value': '1.0'
},
{
'name': 'Content-Type',
'value': 'multipart/alternative; boundary="----=_Part_596529_2039165601.1510920843367"'
}
],
'headersTruncated': False,
'messageId': reference,
'sendingAccountId': '12341234',
'source': '"TEST" <<EMAIL>>',
'sourceArn': 'arn:aws:ses:eu-west-1:12341234:identity/notify.works',
'sourceIp': '0.0.0.1',
'timestamp': '2017-11-17T12:14:03.000Z'
},
'notificationType': 'Bounce'
}
return {
'Type': 'Notification',
'MessageId': '36e67c28-1234-1234-1234-2ea0172aa4a7',
'TopicArn': 'arn:aws:sns:eu-west-1:12341234:ses_notifications',
'Subject': None,
'Message': json.dumps(ses_message_body),
'Timestamp': '2017-11-17T12:14:05.149Z',
'SignatureVersion': '1',
'Signature': '[REDACTED]', # noqa
'SigningCertUrl': 'https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-[REDACTED]].pem',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=[REDACTED]]',
'MessageAttributes': {}
}
|
135706
|
label_data = open("label", encoding='utf-8').readlines()
label_data = [x.strip() for x in label_data]
print(len(label_data))
label_kinds = set(label_data)
print(label_kinds)
|
135787
|
import _curses
_capability_names = {_curses.KEY_A1: 'ka1', _curses.KEY_A3: 'ka3', _curses.
KEY_B2: 'kb2', _curses.KEY_BACKSPACE: 'kbs', _curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt', _curses.KEY_C1: 'kc1', _curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan', _curses.KEY_CATAB: 'ktbc', _curses.
KEY_CLEAR: 'kclr', _curses.KEY_CLOSE: 'kclo', _curses.KEY_COMMAND:
'kcmd', _curses.KEY_COPY: 'kcpy', _curses.KEY_CREATE: 'kcrt', _curses.
KEY_CTAB: 'kctab', _curses.KEY_DC: 'kdch1', _curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1', _curses.KEY_EIC: 'krmir', _curses.KEY_END:
'kend', _curses.KEY_ENTER: 'kent', _curses.KEY_EOL: 'kel', _curses.
KEY_EOS: 'ked', _curses.KEY_EXIT: 'kext', _curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1', _curses.KEY_F10: 'kf10', _curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12', _curses.KEY_F13: 'kf13', _curses.KEY_F14:
'kf14', _curses.KEY_F15: 'kf15', _curses.KEY_F16: 'kf16', _curses.
KEY_F17: 'kf17', _curses.KEY_F18: 'kf18', _curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2', _curses.KEY_F20: 'kf20', _curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22', _curses.KEY_F23: 'kf23', _curses.KEY_F24:
'kf24', _curses.KEY_F25: 'kf25', _curses.KEY_F26: 'kf26', _curses.
KEY_F27: 'kf27', _curses.KEY_F28: 'kf28', _curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3', _curses.KEY_F30: 'kf30', _curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32', _curses.KEY_F33: 'kf33', _curses.KEY_F34:
'kf34', _curses.KEY_F35: 'kf35', _curses.KEY_F36: 'kf36', _curses.
KEY_F37: 'kf37', _curses.KEY_F38: 'kf38', _curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4', _curses.KEY_F40: 'kf40', _curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42', _curses.KEY_F43: 'kf43', _curses.KEY_F44:
'kf44', _curses.KEY_F45: 'kf45', _curses.KEY_F46: 'kf46', _curses.
KEY_F47: 'kf47', _curses.KEY_F48: 'kf48', _curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5', _curses.KEY_F50: 'kf50', _curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52', _curses.KEY_F53: 'kf53', _curses.KEY_F54:
'kf54', _curses.KEY_F55: 'kf55', _curses.KEY_F56: 'kf56', _curses.
KEY_F57: 'kf57', _curses.KEY_F58: 'kf58', _curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6', _curses.KEY_F60: 'kf60', _curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62', _curses.KEY_F63: 'kf63', _curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8', _curses.KEY_F9: 'kf9', _curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp', _curses.KEY_HOME: 'khome', _curses.KEY_IC:
'kich1', _curses.KEY_IL: 'kil1', _curses.KEY_LEFT: 'kcub1', _curses.
KEY_LL: 'kll', _curses.KEY_MARK: 'kmrk', _curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov', _curses.KEY_NEXT: 'knxt', _curses.KEY_NPAGE:
'knp', _curses.KEY_OPEN: 'kopn', _curses.KEY_OPTIONS: 'kopt', _curses.
KEY_PPAGE: 'kpp', _curses.KEY_PREVIOUS: 'kprv', _curses.KEY_PRINT:
'kprt', _curses.KEY_REDO: 'krdo', _curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr', _curses.KEY_REPLACE: 'krpl', _curses.
KEY_RESTART: 'krst', _curses.KEY_RESUME: 'kres', _curses.KEY_RIGHT:
'kcuf1', _curses.KEY_SAVE: 'ksav', _curses.KEY_SBEG: 'kBEG', _curses.
KEY_SCANCEL: 'kCAN', _curses.KEY_SCOMMAND: 'kCMD', _curses.KEY_SCOPY:
'kCPY', _curses.KEY_SCREATE: 'kCRT', _curses.KEY_SDC: 'kDC', _curses.
KEY_SDL: 'kDL', _curses.KEY_SELECT: 'kslt', _curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL', _curses.KEY_SEXIT: 'kEXT', _curses.KEY_SF:
'kind', _curses.KEY_SFIND: 'kFND', _curses.KEY_SHELP: 'kHLP', _curses.
KEY_SHOME: 'kHOM', _curses.KEY_SIC: 'kIC', _curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG', _curses.KEY_SMOVE: 'kMOV', _curses.
KEY_SNEXT: 'kNXT', _curses.KEY_SOPTIONS: 'kOPT', _curses.KEY_SPREVIOUS:
'kPRV', _curses.KEY_SPRINT: 'kPRT', _curses.KEY_SR: 'kri', _curses.
KEY_SREDO: 'kRDO', _curses.KEY_SREPLACE: 'kRPL', _curses.KEY_SRIGHT:
'kRIT', _curses.KEY_SRSUME: 'kRES', _curses.KEY_SSAVE: 'kSAV', _curses.
KEY_SSUSPEND: 'kSPD', _curses.KEY_STAB: 'khts', _curses.KEY_SUNDO:
'kUND', _curses.KEY_SUSPEND: 'kspd', _curses.KEY_UNDO: 'kund', _curses.
KEY_UP: 'kcuu1'}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
if _curses.tigetstr(capability_name):
return True
else:
return False
if __name__ == '__main__':
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append('Mismatch for key %s, system=%i, Python=%i' % (
_curses.keyname(key), system, python))
finally:
_curses.endwin()
for i in L:
print(i)
|
135804
|
from typing import Dict, List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.constants import pi
from kornia.filters import GaussianBlur2d, SpatialGradient
from kornia.geometry.conversions import cart2pol
from kornia.utils import create_meshgrid
# Precomputed coefficients for Von Mises kernel, given N and K(appa).
sqrt2: float = 1.4142135623730951
COEFFS_N1_K1: List[float] = [0.38214156, 0.48090413]
COEFFS_N2_K8: List[float] = [0.14343168, 0.268285, 0.21979234]
COEFFS_N3_K8: List[float] = [0.14343168, 0.268285, 0.21979234, 0.15838885]
COEFFS: Dict[str, List[float]] = {'xy': COEFFS_N1_K1, 'rhophi': COEFFS_N2_K8, 'theta': COEFFS_N3_K8}
urls: Dict[str, str] = {
k: f'https://github.com/manyids2/mkd_pytorch/raw/master/mkd_pytorch/mkd-{k}-64.pth'
for k in ['cart', 'polar', 'concat']
}
def get_grid_dict(patch_size: int = 32) -> Dict[str, torch.Tensor]:
r"""Gets cartesian and polar parametrizations of grid."""
kgrid = create_meshgrid(height=patch_size, width=patch_size, normalized_coordinates=True)
x = kgrid[0, :, :, 0]
y = kgrid[0, :, :, 1]
rho, phi = cart2pol(x, y)
grid_dict = {'x': x, 'y': y, 'rho': rho, 'phi': phi}
return grid_dict
def get_kron_order(d1: int, d2: int) -> torch.Tensor:
r"""Gets order for doing kronecker product."""
kron_order = torch.zeros([d1 * d2, 2], dtype=torch.int64)
for i in range(d1):
for j in range(d2):
kron_order[i * d2 + j, 0] = i
kron_order[i * d2 + j, 1] = j
return kron_order
class MKDGradients(nn.Module):
r"""Module, which computes gradients of given patches, stacked as [magnitudes, orientations].
Given gradients $g_x$, $g_y$ with respect to $x$, $y$ respectively,
- $\mathbox{mags} = $\sqrt{g_x^2 + g_y^2 + eps}$
- $\mathbox{oris} = $\mbox{tan}^{-1}(\nicefrac{g_y}{g_x})$.
Args:
patch_size: Input patch size in pixels.
Returns:
gradients of given patches.
Shape:
- Input: (B, 1, patch_size, patch_size)
- Output: (B, 2, patch_size, patch_size)
Example:
>>> patches = torch.rand(23, 1, 32, 32)
>>> gradient = MKDGradients()
>>> g = gradient(patches) # 23x2x32x32
"""
def __init__(self) -> None:
super().__init__()
self.eps = 1e-8
self.grad = SpatialGradient(mode='diff', order=1, normalized=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(x)))
if not len(x.shape) == 4:
raise ValueError("Invalid input shape, we expect Bx1xHxW. Got: {}".format(x.shape))
# Modify 'diff' gradient. Before we had lambda function, but it is not jittable
grads_xy = -self.grad(x)
gx = grads_xy[:, :, 0, :, :]
gy = grads_xy[:, :, 1, :, :]
y = torch.cat(cart2pol(gx, gy, self.eps), dim=1)
return y
def __repr__(self) -> str:
return self.__class__.__name__
class VonMisesKernel(nn.Module):
r"""Module, which computes parameters of Von Mises kernel given coefficients, and embeds given patches.
Args:
patch_size: Input patch size in pixels.
coeffs: List of coefficients. Some examples are hardcoded in COEFFS,
Returns:
Von Mises embedding of given parametrization.
Shape:
- Input: (B, 1, patch_size, patch_size)
- Output: (B, d, patch_size, patch_size)
Examples:
>>> oris = torch.rand(23, 1, 32, 32)
>>> vm = VonMisesKernel(patch_size=32,
... coeffs=[0.14343168,
... 0.268285,
... 0.21979234])
>>> emb = vm(oris) # 23x7x32x32
"""
def __init__(self, patch_size: int, coeffs: Union[list, tuple]) -> None:
super().__init__()
self.patch_size = patch_size
b_coeffs: torch.Tensor = torch.tensor(coeffs)
self.register_buffer('coeffs', b_coeffs)
# Compute parameters.
n: int = len(coeffs) - 1
self.n: int = n
self.d: int = 2 * n + 1
# Precompute helper variables.
emb0 = torch.ones([1, 1, patch_size, patch_size])
frange = torch.arange(n) + 1
frange = frange.reshape(-1, 1, 1)
weights = torch.zeros([2 * n + 1])
weights[: n + 1] = torch.sqrt(b_coeffs)
weights[n + 1 :] = torch.sqrt(b_coeffs[1:])
weights = weights.reshape(-1, 1, 1)
self.register_buffer('emb0', emb0)
self.register_buffer('frange', frange)
self.register_buffer('weights', weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(x)))
if not len(x.shape) == 4 or x.shape[1] != 1:
raise ValueError("Invalid input shape, we expect Bx1xHxW. Got: {}".format(x.shape))
# TODO: unify the two lines below when pytorch 1.6 support is dropped
emb0: torch.Tensor = torch.jit.annotate(torch.Tensor, self.emb0)
emb0 = emb0.to(x).repeat(x.size(0), 1, 1, 1)
frange = self.frange.to(x) * x
emb1 = torch.cos(frange)
emb2 = torch.sin(frange)
embedding = torch.cat([emb0, emb1, emb2], dim=1)
embedding = self.weights * embedding
return embedding
def __repr__(self) -> str:
return (
self.__class__.__name__
+ '('
+ 'patch_size='
+ str(self.patch_size)
+ ', '
+ 'n='
+ str(self.n)
+ ', '
+ 'd='
+ str(self.d)
+ ', '
+ 'coeffs='
+ str(self.coeffs)
+ ')'
)
class EmbedGradients(nn.Module):
r"""Module that computes gradient embedding, weighted by sqrt of magnitudes of given patches.
Args:
patch_size: Input patch size in pixels.
relative: absolute or relative gradients.
Returns:
Gradient embedding.
Shape:
- Input: (B, 2, patch_size, patch_size)
- Output: (B, 7, patch_size, patch_size)
Examples:
>>> grads = torch.rand(23, 2, 32, 32)
>>> emb_grads = EmbedGradients(patch_size=32,
... relative=False)
>>> emb = emb_grads(grads) # 23x7x32x32
"""
def __init__(self, patch_size: int = 32, relative: bool = False) -> None:
super().__init__()
self.patch_size = patch_size
self.relative = relative
self.eps = 1e-8
# Theta kernel for gradients.
self.kernel = VonMisesKernel(patch_size=patch_size, coeffs=COEFFS['theta'])
# Relative gradients.
kgrid = create_meshgrid(height=patch_size, width=patch_size, normalized_coordinates=True)
_, phi = cart2pol(kgrid[:, :, :, 0], kgrid[:, :, :, 1])
self.register_buffer('phi', phi)
def emb_mags(self, mags: torch.Tensor) -> torch.Tensor:
"""Embed square roots of magnitudes with eps for numerical reasons."""
mags = torch.sqrt(mags + self.eps)
return mags
def forward(self, grads: torch.Tensor) -> torch.Tensor:
if not isinstance(grads, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(grads)))
if not len(grads.shape) == 4:
raise ValueError("Invalid input shape, we expect Bx2xHxW. Got: {}".format(grads.shape))
mags = grads[:, :1, :, :]
oris = grads[:, 1:, :, :]
if self.relative:
oris = oris - self.phi.to(oris)
y = self.kernel(oris) * self.emb_mags(mags)
return y
def __repr__(self) -> str:
return (
self.__class__.__name__
+ '('
+ 'patch_size='
+ str(self.patch_size)
+ ', '
+ 'relative='
+ str(self.relative)
+ ')'
)
def spatial_kernel_embedding(kernel_type, grids: dict) -> torch.Tensor:
r"""Compute embeddings for cartesian and polar parametrizations."""
factors = {"phi": 1.0, "rho": pi / sqrt2, "x": pi / 2, "y": pi / 2}
if kernel_type == 'cart':
coeffs_ = 'xy'
params_ = ['x', 'y']
elif kernel_type == 'polar':
coeffs_ = 'rhophi'
params_ = ['phi', 'rho']
# Infer patch_size.
keys = list(grids.keys())
patch_size = grids[keys[0]].shape[-1]
# Scale appropriately.
grids_normed = {k: v * factors[k] for k, v in grids.items()}
grids_normed = {k: v.unsqueeze(0).unsqueeze(0).float() for k, v in grids_normed.items()}
# x,y/rho,phi kernels.
vm_a = VonMisesKernel(patch_size=patch_size, coeffs=COEFFS[coeffs_])
vm_b = VonMisesKernel(patch_size=patch_size, coeffs=COEFFS[coeffs_])
emb_a = vm_a(grids_normed[params_[0]]).squeeze()
emb_b = vm_b(grids_normed[params_[1]]).squeeze()
# Final precomputed position embedding.
kron_order = get_kron_order(vm_a.d, vm_b.d)
spatial_kernel = emb_a.index_select(0, kron_order[:, 0]) * emb_b.index_select(0, kron_order[:, 1])
return spatial_kernel
class ExplicitSpacialEncoding(nn.Module):
r"""Module that computes explicit cartesian or polar embedding.
Args:
kernel_type: Parametrization of kernel ``'polar'`` or ``'cart'``.
fmap_size: Input feature map size in pixels.
in_dims: Dimensionality of input feature map.
do_gmask: Apply gaussian mask.
do_l2: Apply l2-normalization.
Returns:
Explicit cartesian or polar embedding.
Shape:
- Input: (B, in_dims, fmap_size, fmap_size)
- Output: (B, out_dims, fmap_size, fmap_size)
Example:
>>> emb_ori = torch.rand(23, 7, 32, 32)
>>> ese = ExplicitSpacialEncoding(kernel_type='polar',
... fmap_size=32,
... in_dims=7,
... do_gmask=True,
... do_l2=True)
>>> desc = ese(emb_ori) # 23x175x32x32
"""
def __init__(
self,
kernel_type: str = 'polar',
fmap_size: int = 32,
in_dims: int = 7,
do_gmask: bool = True,
do_l2: bool = True,
) -> None:
super().__init__()
if kernel_type not in ['polar', 'cart']:
raise NotImplementedError(f'{kernel_type} is not valid, use polar or cart).')
self.kernel_type = kernel_type
self.fmap_size = fmap_size
self.in_dims = in_dims
self.do_gmask = do_gmask
self.do_l2 = do_l2
self.grid = get_grid_dict(fmap_size)
self.gmask = None
# Precompute embedding.
emb = spatial_kernel_embedding(self.kernel_type, self.grid)
# Gaussian mask.
if self.do_gmask:
self.gmask = self.get_gmask(sigma=1.0)
emb = emb * self.gmask
# Store precomputed embedding.
self.register_buffer('emb', emb.unsqueeze(0))
self.d_emb: int = emb.shape[0]
self.out_dims: int = self.in_dims * self.d_emb
self.odims: int = self.out_dims
# Store kronecker form.
emb2, idx1 = self.init_kron()
self.register_buffer('emb2', emb2)
self.register_buffer('idx1', idx1)
def get_gmask(self, sigma: float) -> torch.Tensor:
"""Compute Gaussian mask."""
norm_rho = self.grid['rho'] / self.grid['rho'].max()
gmask = torch.exp(-1 * norm_rho ** 2 / sigma ** 2)
return gmask
def init_kron(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Initialize helper variables to calculate kronecker."""
kron = get_kron_order(self.in_dims, self.d_emb)
_emb = torch.jit.annotate(torch.Tensor, self.emb)
emb2 = torch.index_select(_emb, 1, kron[:, 1])
return emb2, kron[:, 0]
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(x)))
if not ((len(x.shape) == 4) | (x.shape[1] == self.in_dims)):
raise ValueError("Invalid input shape, we expect Bx{}xHxW. Got: {}".format(self.in_dims, x.shape))
idx1 = torch.jit.annotate(torch.Tensor, self.idx1)
emb1 = torch.index_select(x, 1, idx1)
output = emb1 * self.emb2
output = output.sum(dim=(2, 3))
if self.do_l2:
output = F.normalize(output, dim=1)
return output
def __repr__(self) -> str:
return (
self.__class__.__name__
+ '('
+ 'kernel_type='
+ str(self.kernel_type)
+ ', '
+ 'fmap_size='
+ str(self.fmap_size)
+ ', '
+ 'in_dims='
+ str(self.in_dims)
+ ', '
+ 'out_dims='
+ str(self.out_dims)
+ ', '
+ 'do_gmask='
+ str(self.do_gmask)
+ ', '
+ 'do_l2='
+ str(self.do_l2)
+ ')'
)
class Whitening(nn.Module):
r"""Module, performs supervised or unsupervised whitening.
This is based on the paper "Understanding and Improving Kernel Local Descriptors".
See :cite:`mukundan2019understanding` for more details.
Args:
xform: Variant of whitening to use. None, 'lw', 'pca', 'pcaws', 'pcawt'.
whitening_model: Dictionary with keys 'mean', 'eigvecs', 'eigvals' holding torch.Tensors.
in_dims: Dimensionality of input descriptors.
output_dims: (int) Dimensionality reduction.
keval: Shrinkage parameter.
t: Attenuation parameter.
Returns:
l2-normalized, whitened descriptors.
Shape:
- Input: (B, in_dims, fmap_size, fmap_size)
- Output: (B, out_dims, fmap_size, fmap_size)
Examples:
>>> descs = torch.rand(23, 238)
>>> whitening_model = {'pca': {'mean': torch.zeros(238),
... 'eigvecs': torch.eye(238),
... 'eigvals': torch.ones(238)}}
>>> whitening = Whitening(xform='pcawt',
... whitening_model=whitening_model,
... in_dims=238,
... output_dims=128,
... keval=40,
... t=0.7)
>>> wdescs = whitening(descs) # 23x128
"""
def __init__(
self,
xform: str,
whitening_model: Union[Dict[str, Dict[str, torch.Tensor]], None],
in_dims: int,
output_dims: int = 128,
keval: int = 40,
t: float = 0.7,
) -> None:
super().__init__()
self.xform = xform
self.in_dims = in_dims
self.keval = keval
self.t = t
self.pval = 1.0
# Compute true output_dims.
output_dims = min(output_dims, in_dims)
self.output_dims = output_dims
# Initialize identity transform.
self.mean = nn.Parameter(torch.zeros(in_dims), requires_grad=True)
self.evecs = nn.Parameter(torch.eye(in_dims)[:, :output_dims], requires_grad=True)
self.evals = nn.Parameter(torch.ones(in_dims)[:output_dims], requires_grad=True)
if whitening_model is not None:
self.load_whitening_parameters(whitening_model)
def load_whitening_parameters(self, whitening_model: Dict[str, Dict[str, torch.Tensor]]) -> None:
algo = 'lw' if self.xform == 'lw' else 'pca'
wh_model = whitening_model[algo]
self.mean.data = wh_model['mean']
self.evecs.data = wh_model['eigvecs'][:, : self.output_dims]
self.evals.data = wh_model['eigvals'][: self.output_dims]
modifications = {
'pca': self._modify_pca,
'lw': self._modify_lw,
'pcaws': self._modify_pcaws,
'pcawt': self._modify_pcawt,
}
# Call modification.
modifications[self.xform]()
def _modify_pca(self) -> None:
"""Modify powerlaw parameter."""
self.pval = 0.5
def _modify_lw(self) -> None:
"""No modification required."""
def _modify_pcaws(self) -> None:
"""Shrinkage for eigenvalues."""
alpha = self.evals[self.keval]
evals = ((1 - alpha) * self.evals) + alpha
self.evecs.data = self.evecs @ torch.diag(torch.pow(evals, -0.5))
def _modify_pcawt(self) -> None:
"""Attenuation for eigenvalues."""
m = -0.5 * self.t
self.evecs.data = self.evecs @ torch.diag(torch.pow(self.evals, m))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(x)))
if not len(x.shape) == 2:
raise ValueError("Invalid input shape, we expect NxD. Got: {}".format(x.shape))
x = x - self.mean # Center the data.
x = x @ self.evecs # Apply rotation and/or scaling.
x = torch.sign(x) * torch.pow(torch.abs(x), self.pval) # Powerlaw.
return F.normalize(x, dim=1)
def __repr__(self) -> str:
return (
self.__class__.__name__
+ '('
+ 'xform='
+ str(self.xform)
+ ', '
+ 'in_dims='
+ str(self.in_dims)
+ ', '
+ 'output_dims='
+ str(self.output_dims)
+ ')'
)
class MKDDescriptor(nn.Module):
r"""Module that computes Multiple Kernel local descriptors.
This is based on the paper "Understanding and Improving Kernel Local Descriptors".
See :cite:`mukundan2019understanding` for more details.
Args:
patch_size: Input patch size in pixels.
kernel_type: Parametrization of kernel ``'concat'``, ``'cart'``, ``'polar'``.
whitening: Whitening transform to apply ``None``, ``'lw'``, ``'pca'``, ``'pcawt'``, ``'pcaws'``.
training_set: Set that model was trained on ``'liberty'``, ``'notredame'``, ``'yosemite'``.
output_dims: Dimensionality reduction.
Returns:
Explicit cartesian or polar embedding.
Shape:
- Input: :math:`(B, in_{dims}, fmap_{size}, fmap_{size})`.
- Output: :math:`(B, out_{dims}, fmap_{size}, fmap_{size})`,
Examples:
>>> patches = torch.rand(23, 1, 32, 32)
>>> mkd = MKDDescriptor(patch_size=32,
... kernel_type='concat',
... whitening='pcawt',
... training_set='liberty',
... output_dims=128)
>>> desc = mkd(patches) # 23x128
"""
def __init__(
self,
patch_size: int = 32,
kernel_type: str = 'concat',
whitening: str = 'pcawt',
training_set: str = 'liberty',
output_dims: int = 128,
) -> None:
super().__init__()
self.patch_size: int = patch_size
self.kernel_type: str = kernel_type
self.whitening: str = whitening
self.training_set: str = training_set
self.sigma = 1.4 * (patch_size / 64)
self.smoothing = GaussianBlur2d((5, 5), (self.sigma, self.sigma), 'replicate')
self.gradients = MKDGradients()
# This stupid thing needed for jitting...
polar_s: str = 'polar'
cart_s: str = 'cart'
self.parametrizations = [polar_s, cart_s] if self.kernel_type == 'concat' else [self.kernel_type]
# Initialize cartesian/polar embedding with absolute/relative gradients.
self.odims: int = 0
relative_orientations = {polar_s: True, cart_s: False}
self.feats = {}
for parametrization in self.parametrizations:
gradient_embedding = EmbedGradients(patch_size=patch_size, relative=relative_orientations[parametrization])
spatial_encoding = ExplicitSpacialEncoding(
kernel_type=parametrization, fmap_size=patch_size, in_dims=gradient_embedding.kernel.d
)
self.feats[parametrization] = nn.Sequential(gradient_embedding, spatial_encoding)
self.odims += spatial_encoding.odims
# Compute true output_dims.
self.output_dims: int = min(output_dims, self.odims)
# Load supervised(lw)/unsupervised(pca) model trained on training_set.
if self.whitening is not None:
whitening_models = torch.hub.load_state_dict_from_url(
urls[self.kernel_type], map_location=lambda storage, loc: storage
)
whitening_model = whitening_models[training_set]
self.whitening_layer = Whitening(
whitening, whitening_model, in_dims=self.odims, output_dims=self.output_dims
)
self.odims = self.output_dims
def forward(self, patches: torch.Tensor) -> torch.Tensor:
if not isinstance(patches, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(patches)))
if not len(patches.shape) == 4:
raise ValueError("Invalid input shape, we expect Bx1xHxW. Got: {}".format(patches.shape))
# Extract gradients.
g = self.smoothing(patches)
g = self.gradients(g)
# Extract polar/cart features.
features = []
for parametrization in self.parametrizations:
self.feats[parametrization].to(g.device)
features.append(self.feats[parametrization](g))
# Concatenate.
y = torch.cat(features, dim=1)
# l2-normalize.
y = F.normalize(y, dim=1)
# Whiten descriptors.
if self.whitening is not None:
y = self.whitening_layer(y)
return y
def __repr__(self) -> str:
return (
self.__class__.__name__
+ '('
+ 'patch_size='
+ str(self.patch_size)
+ ', '
+ 'kernel_type='
+ str(self.kernel_type)
+ ', '
+ 'whitening='
+ str(self.whitening)
+ ', '
+ 'training_set='
+ str(self.training_set)
+ ', '
+ 'output_dims='
+ str(self.output_dims)
+ ')'
)
def load_whitening_model(kernel_type: str, training_set: str) -> Dict:
whitening_models = torch.hub.load_state_dict_from_url(urls[kernel_type], map_location=lambda storage, loc: storage)
whitening_model = whitening_models[training_set]
return whitening_model
class SimpleKD(nn.Module):
"""Example to write custom Kernel Descriptors."""
def __init__(
self,
patch_size: int = 32,
kernel_type: str = 'polar', # 'cart' 'polar'
whitening: str = 'pcawt', # 'lw', 'pca', 'pcaws', 'pcawt
training_set: str = 'liberty', # 'liberty', 'notredame', 'yosemite'
output_dims: int = 128,
) -> None:
super().__init__()
relative: bool = kernel_type == 'polar'
sigma: float = 1.4 * (patch_size / 64)
# Sequence of modules.
smoothing = GaussianBlur2d((5, 5), (sigma, sigma), 'replicate')
gradients = MKDGradients()
ori = EmbedGradients(patch_size=patch_size, relative=relative)
ese = ExplicitSpacialEncoding(kernel_type=kernel_type, fmap_size=patch_size, in_dims=ori.kernel.d)
wh = Whitening(
whitening, load_whitening_model(kernel_type, training_set), in_dims=ese.odims, output_dims=output_dims
)
self.features = nn.Sequential(smoothing, gradients, ori, ese, wh)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.features(x)
|
135825
|
import torch.nn as nn
from torch.nn.functional import softmax
from torch.nn import CrossEntropyLoss
from typing import List, Any, Dict, Union
from sciwing.data.line import Line
from sciwing.data.label import Label
from wasabi import Printer
from sciwing.utils.class_nursery import ClassNursery
from sciwing.data.datasets_manager import DatasetsManager
import torch
class SimpleClassifier(nn.Module, ClassNursery):
def __init__(
self,
encoder: nn.Module,
encoding_dim: int,
num_classes: int,
classification_layer_bias: bool = True,
label_namespace: str = "label",
datasets_manager: DatasetsManager = None,
device: Union[torch.device, str] = torch.device("cpu"),
):
""" SimpleClassifier is a linear classifier head on top of any encoder
Parameters
----------
encoder : nn.Module
Any encoder that takes in lines and produces a single vector
for every line.
encoding_dim : int
The encoding dimension
num_classes : int
The number of classes
classification_layer_bias : bool
Whether to add classification layer bias or no
This is set to false only for debugging purposes ff
label_namespace : str
The namespace used for labels in the dataset
datasets_manager: DatasetsManager
The datasets manager for the model
device: torch.device
The device on which the model is run
"""
super(SimpleClassifier, self).__init__()
self.encoder = encoder
self.encoding_dim = encoding_dim
self.num_classes = num_classes
self.classification_layer_bias = classification_layer_bias
self.classification_layer = nn.Linear(
self.encoding_dim, num_classes, bias=self.classification_layer_bias
)
self._loss = CrossEntropyLoss()
self.label_namespace = label_namespace
self.datasets_manager = datasets_manager
self.label_numericalizer = self.datasets_manager.namespace_to_numericalizer[
self.label_namespace
]
self.device = torch.device(device) if isinstance(device, str) else device
self.msg_printer = Printer()
def forward(
self,
lines: List[Line],
labels: List[Label] = None,
is_training: bool = False,
is_validation: bool = False,
is_test: bool = False,
) -> Dict[str, Any]:
"""
Parameters
----------
lines : List[Line]
``iter_dict`` from any dataset that will be passed on to the encoder
labels: List[Label]
A list of labels for every instance
is_training : bool
running forward on training dataset?
is_validation : bool
running forward on validation dataset?
is_test : bool
running forward on test dataset?
Returns
-------
Dict[str, Any]
logits: torch.FloatTensor
Un-normalized probabilities over all the classes
of the shape ``[batch_size, num_classes]``
normalized_probs: torch.FloatTensor
Normalized probabilities over all the classes
of the shape ``[batch_size, num_classes]``
loss: float
Loss value if this is a training forward pass
or validation loss. There will be no loss
if this is the test dataset
"""
encoding = self.encoder(lines)
# N * C
# N - batch size
# C - number of classes
logits = self.classification_layer(encoding)
# N * C
# N - batch size
# C - number of classes
# The normalized probabilities of classification
normalized_probs = softmax(logits, dim=1)
output_dict = {"logits": logits, "normalized_probs": normalized_probs}
if is_training or is_validation:
label_indices = []
for label in labels:
label_ = label.tokens[self.label_namespace]
label_ = [tok.text for tok in label_]
label_ = self.label_numericalizer.numericalize_instance(instance=label_)
label_indices.append(label_[0]) # taking only the first label here
labels_tensor = torch.tensor(
label_indices, device=self.device, dtype=torch.long
)
assert labels_tensor.ndimension() == 1, self.msg_printer.fail(
"the labels should have 1 dimension "
"your input has shape {0}".format(labels_tensor.size())
)
loss = self._loss(logits, labels_tensor)
output_dict["loss"] = loss
return output_dict
|
135831
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Game(models.Model): #Overall Game Object
name = models.CharField(max_length=200) #name of the game
start_time = models.DateTimeField() #time to start
end_time = models.DateTimeField() #time game ends
active = models.IntegerField(default=1) #is the game active
require_regcodes = models.IntegerField(default=0) #does the game require reg codes
def __unicode__(self):
return self.name
class Category(models.Model): #categories for challegnes
game = models.ForeignKey(Game) #in which game
name = models.CharField(max_length=200) #name of the category
def __unicode__(self):
return self.name
class Challenge(models.Model): #CTF challenges
game = models.ForeignKey(Game) #in which game
category = models.ForeignKey(Category) #Category Associated
name = models.CharField(max_length=200) #Name of the challenge
description = models.CharField(max_length=2000) #description , pointers etc
points = models.IntegerField(default=100) #point value for the challenge
active = models.IntegerField(default=0) #is the challenge active
key = models.CharField(max_length=200) #scoring key for the challenge
def __unicode__(self):
return self.name
class Hint(models.Model): #hints to be displayed for a given challenge
game = models.ForeignKey(Game) #in which game
challenge = models.ForeignKey(Challenge) #challenge
text = models.CharField(max_length=2000) #hint text
active = models.IntegerField(default=0) #is the hint active
def __unicode__(self):
return self.text
class RegCodes(models.Model): # valid once reg codes
code = models.CharField(max_length=200, null=True, blank=True) #codes
used = models.IntegerField(default=0) #is it used?
def __unicode__(self):
return self.code
class Competitor(models.Model): #hold competiors (may extend the auth_user, dunno)
game = models.ForeignKey(Game) #in which game
user = models.OneToOneField(User)
display_name = models.CharField(max_length=200) #name to display
affiliation = models.CharField(max_length=200, null=True, blank=True) #affiliation text to display
url = models.CharField(max_length=200, null=True, blank=True) #url
bad_keys = models.IntegerField(default=0) #how many bad keys have they submitted
points = models.IntegerField(default=0) #current point total
active = models.IntegerField(default=1) #is the competitor active (ie allowed to play, score, count in standings)
ipaddr = models.CharField(max_length=200, null=True, blank=True) #ip the competitor reged from
regcode = models.ForeignKey(RegCodes, null=True) #code the competitor used to reg
def __unicode__(self):
return self.display_name
class Solved(models.Model): #challenges solved
game = models.ForeignKey(Game) #in which game
competitor = models.ForeignKey(Competitor) #by whom
challenge = models.ForeignKey(Challenge) #which challenge
points = models.IntegerField(default=0) #how many points they got
time = models.DateTimeField() #when they did it
|
135833
|
from sample_factory.envs.env_registry import global_env_registry
def create_env(full_env_name, cfg=None, env_config=None):
"""
Factory function that creates environment instances.
Matches full_env_name with env family prefixes registered in the REGISTRY and calls make_env_func()
for the first match.
:param full_env_name: complete name of the environment, starting with the prefix of registered environment family,
e.g. atari_breakout, or doom_battle. Passed to make_env_func() for further processing by the specific env family
factory (see doom_utils.py or dmlab_env.py)
:param cfg: namespace with full system configuration, output of argparser (or AttrDict when loaded from JSON)
:param env_config: AttrDict with additional system information:
env_config = AttrDict(worker_index=self.worker_idx, vector_index=vector_idx, env_id=env_id)
:return: environment instance
"""
env_registry = global_env_registry()
env_registry_entry = env_registry.resolve_env_name(full_env_name)
env = env_registry_entry.make_env_func(full_env_name, cfg=cfg, env_config=env_config)
return env
|
135877
|
import torch
def accuracy(preds, labels, ignore_index=None):
with torch.no_grad():
assert preds.shape[0] == len(labels)
correct = torch.sum(preds == labels)
total = torch.sum(torch.ones_like(labels))
if ignore_index is not None:
# 모델이 맞춘 것 가운데 ignore index에 해당하는 것 제외
correct -= torch.sum(torch.logical_and(preds == ignore_index, preds == labels))
# accuracy의 분모 가운데 ignore index에 해당하는 것 제외
total -= torch.sum(labels == ignore_index)
return correct.to(dtype=torch.float) / total.to(dtype=torch.float)
|
135900
|
from __future__ import division, print_function
from action_detector_diagnosis import ActionDetectorDiagnosis
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import pandas as pd
import os
from collections import OrderedDict
from utils import interpolated_prec_rec
from matplotlib import gridspec, rc
import matplotlib as mpl
mpl.use('Agg')
params = {'font.family': 'serif','font.serif': 'Times',
'text.usetex': True,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.width': 3,
'mathtext.fontset': 'custom',
}
mpl.rcParams.update(params)
import matplotlib.pyplot as plt
def compute_mAP_N(result,this_cls_pred,this_cls_gt):
ap = np.zeros(len(result.tiou_thresholds))
tp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
fp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
for tidx, tiou in enumerate(result.tiou_thresholds):
fp[tidx,pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values] = 1
tp[tidx,~(pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values)] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / len(np.unique(this_cls_gt['gt-id']))
precision_cumsum = recall_cumsum * result.average_num_instance_per_class / (recall_cumsum * result.average_num_instance_per_class + fp_cumsum)
for tidx in range(len(result.tiou_thresholds)):
ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:])
return ap.mean()
# Initialize true positive and false positive vectors.
def compute_average_mAP_N_for_characteristic(sensitivity_analysis, characteristic_name):
gt_by_characteristic = sensitivity_analysis.ground_truth.groupby(characteristic_name)
average_mAP_n_by_characteristic_value = OrderedDict()
for characteristic_value, this_characteristic_gt in gt_by_characteristic:
ap = np.nan*np.zeros(len(sensitivity_analysis.activity_index))
gt_by_cls = this_characteristic_gt.groupby('label')
pred_by_cls = sensitivity_analysis.prediction.groupby('label')
for cls in sensitivity_analysis.activity_index.values():
this_cls_pred = pred_by_cls.get_group(cls).sort_values(by='score',ascending=False)
try:
this_cls_gt = gt_by_cls.get_group(cls)
except:
continue
gt_id_to_keep = np.append(this_cls_gt['gt-id'].values, [np.nan])
for tidx, tiou in enumerate(sensitivity_analysis.tiou_thresholds):
this_cls_pred = this_cls_pred[this_cls_pred[sensitivity_analysis.matched_gt_id_cols[tidx]].isin(gt_id_to_keep)]
ap[cls] = compute_mAP_N(sensitivity_analysis,this_cls_pred,this_cls_gt)
average_mAP_n_by_characteristic_value[characteristic_value] = np.nanmean(ap)
return average_mAP_n_by_characteristic_value
def plot_sensitivity_analysis(sensitivity_analysis, save_filename,
colors=['#7fc97f','#beaed4','#fdc086','#386cb0','#f0027f','#bf5b17'],
characteristic_names=['context-size', 'context-distance', 'agreement','coverage', 'length', 'num-instances'],
characteristic_names_in_text=['Context Size', 'Context Distance', 'Agreement', 'Coverage', 'Length', '\# Instances'],
characteristic_names_delta_positions=[1.1,-1.4,0.25,0.5,1,-0.2],
buckets_order=['0','1','2','3','4','5','6','XW', 'W', 'XS','S', 'N', 'M', 'F', 'Inf', 'L', 'XL', 'H', 'XH'],
figsize=(25,6), fontsize=28, num_grids=4):
average_mAP_N_by_characteristic = OrderedDict()
average_mAP_N_by_characteristic['base'] = sensitivity_analysis.average_mAP
for characteristic_name in characteristic_names:
average_mAP_N_by_characteristic[characteristic_name] = compute_average_mAP_N_for_characteristic(sensitivity_analysis,
characteristic_name)
characteristic_name_lst,bucket_lst = ['base'],['base']
ratio_value_lst = [average_mAP_N_by_characteristic['base']]
for characteristic_name in characteristic_names:
characteristic_name_lst += len(average_mAP_N_by_characteristic[characteristic_name])*[characteristic_name]
bucket_lst += average_mAP_N_by_characteristic[characteristic_name].keys()
ratio_value_lst += average_mAP_N_by_characteristic[characteristic_name].values()
# characteristic-name,bucket,ratio-value
sensitivity_analysis_df = pd.DataFrame({'characteristic-name': characteristic_name_lst,
'bucket': bucket_lst,
'ratio-value': ratio_value_lst,
})
sensitivity_analysis_df['order'] = pd.Categorical(sensitivity_analysis_df['bucket'],
categories=buckets_order,ordered=True)
sensitivity_analysis_df.sort_values(by='order', inplace=True)
sensitivity_analysis_df_by_characteristic_name = sensitivity_analysis_df.groupby('characteristic-name')
base_average_mAP_N = sensitivity_analysis_df_by_characteristic_name.get_group('base')['ratio-value'].values[0]*100
fig = plt.figure(figsize=figsize)
grid = plt.GridSpec(1, num_grids)
ax1=fig.add_subplot(grid[:-1])
current_x_value = 0
xticks_lst,xvalues_lst = [],[]
for char_idx, characteristic_name in enumerate(characteristic_names):
this_sensitivity_analysis = sensitivity_analysis_df_by_characteristic_name.get_group(characteristic_name)
x_values = range(current_x_value, current_x_value + len(this_sensitivity_analysis))
y_values = this_sensitivity_analysis['ratio-value'].values*100
mybars=ax1.bar(x_values, y_values, color=colors[char_idx])
for bari in mybars:
height = bari.get_height()
ax1.text(bari.get_x() + bari.get_width()/2, bari.get_height()+0.025*100, '%.1f' % height,
ha='center', color='black', fontsize=fontsize/1.15)
ax1.annotate(characteristic_names_in_text[char_idx],
xy=(current_x_value+characteristic_names_delta_positions[char_idx],100),
fontsize=fontsize)
if char_idx < len(characteristic_names) - 1:
ax1.axvline(max(x_values)+1, linewidth=1.5, color="gray", linestyle='dotted')
current_x_value = max(x_values) + 2
xticks_lst.extend(this_sensitivity_analysis['bucket'].values.tolist())
xvalues_lst.extend(x_values)
ax1.plot([xvalues_lst[0]- 1, xvalues_lst[-1] + 1],[base_average_mAP_N, base_average_mAP_N], '--', color='k')
ax1.annotate('%.2f' % base_average_mAP_N,xy=(xvalues_lst[-1]-0.5,base_average_mAP_N+0.025*100), fontsize=fontsize/1.15)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.yaxis.grid(True, linestyle='dotted')
ax1.set_axisbelow(True)
ax1.xaxis.set_tick_params(width=0)
ax1.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax1.spines[axis].set_linewidth(2.5)
plt.xticks(xvalues_lst, xticks_lst, fontsize=fontsize/1.1)
plt.yticks(fontsize=fontsize)
ax1.set_ylabel('Average-mAP$_{N}$ $(\%)$', fontsize=fontsize)
ax1.set_ylim(0,100)
ax1.set_xlim(-1.5,current_x_value-1)
ax2=fig.add_subplot(grid[-1:])
current_x_value = 0
xticks_lst,xvalues_lst = [],[]
min_y_value,max_y_value=np.infty,-np.infty
for char_idx, characteristic_name in enumerate(characteristic_names):
this_sensitivity_analysis = sensitivity_analysis_df_by_characteristic_name.get_group(characteristic_name)
x_values = [current_x_value,current_x_value]
y_values = this_sensitivity_analysis['ratio-value'].values*100
y_values = [min(y_values)/base_average_mAP_N,max(y_values)/base_average_mAP_N]
this_min_y_value,this_max_y_value=min(y_values),max(y_values)
min_y_value,max_y_value=min(min_y_value,this_min_y_value),max(max_y_value,this_max_y_value)
ax2.plot([current_x_value,current_x_value],
[this_min_y_value,this_max_y_value], linestyle='-', marker='_', mew=5, markersize=25,lw=8,color=colors[char_idx])
for i,j in zip(x_values,y_values):
ax2.annotate('%.1f' % j,xy=(i+0.1,j+0.05), fontsize=fontsize/1.1)
current_x_value += 1
xticks_lst += [characteristic_names_in_text[char_idx]]
xvalues_lst += [x_values[0]]
ax2.plot([xvalues_lst[0]- 1, xvalues_lst[-1] + 1],[base_average_mAP_N/base_average_mAP_N, base_average_mAP_N/base_average_mAP_N], '--', color='k',zorder=0)
ax2.annotate('%.2f' % base_average_mAP_N,xy=(xvalues_lst[-1]+0.2,base_average_mAP_N/base_average_mAP_N+0.05), fontsize=fontsize/1.1)
ax2.yaxis.grid(color='gray', linestyle=':',lw=1)
ax2.xaxis.grid(color='gray', linestyle=':',lw=1)
ax2.set_axisbelow(True)
ax2.xaxis.set_tick_params(width=0)
ax2.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax2.spines[axis].set_linewidth(2.5)
plt.xticks(xvalues_lst, xticks_lst, fontsize=fontsize/1.5, rotation=90)
plt.yticks(fontsize=fontsize)
ax2.set_ylabel('Average-mAP$_{N}$\nRelative Change', fontsize=fontsize)
ax2.set_ylim(min_y_value*0.8,max_y_value*1.2)
plt.tight_layout()
fig.savefig(save_filename,bbox_inches='tight')
print('[Done] Output analysis is saved in %s' % save_filename)
def main(ground_truth_filename, subset, prediction_filename, output_folder, is_thumos14):
if not is_thumos14:
if subset == 'testing':
# ActivityNet testing
characteristic_names_to_bins = {'context-size': (range(-1,7), ['0','1','2','3','4','5','6']),
'context-distance': (range(-1,4), ['Inf','N','M','F']),
'agreement': (np.linspace(0,1.0,6), ['XW','W','M','H','XH']),
'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])}
colors = ['#7fc97f','#beaed4','#fdc086','#386cb0','#f0027f','#bf5b17']
characteristic_names = ['context-size', 'context-distance', 'agreement','coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Context Size', 'Context Distance', 'Agreement', 'Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [1.1,-1.4,0.25,0.5,1,-0.2]
figsize = (25,6)
num_grids = 4
elif subset == 'validation':
# ActivityNet validation
characteristic_names_to_bins = {'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])}
colors = ['#386cb0','#f0027f','#bf5b17']
characteristic_names = ['coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [0.5,1,-0.2]
figsize = (17.5,6)
num_grids = 3
else:
raise RuntimeError('%s is not a valid subset' % subset)
tiou_thresholds = np.linspace(0.5, 0.95, 10)
else:
# THUMOS14
characteristic_names_to_bins = {'coverage': (np.array([0,0.02,0.04,0.06,0.08,1]), ['XS','S','M','L','XL']),
'length': (np.array([0,3,6,12,18,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,40,80,np.inf]), ['XS','S','M','L'])}
colors = ['#386cb0','#f0027f','#bf5b17']
characteristic_names = ['coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [0.5,1,-0.2]
figsize = (17.5,6)
num_grids = 3
tiou_thresholds = [0.5]
sensitivity_analysis = ActionDetectorDiagnosis(ground_truth_filename=ground_truth_filename,
prediction_filename=prediction_filename,
tiou_thresholds=tiou_thresholds,
limit_factor=None,
min_tiou_thr=0.1,
subset=subset,
verbose=True,
check_status=True,
load_extra_annotations=True,
characteristic_names_to_bins=characteristic_names_to_bins,
normalize_ap=True,
minimum_normalized_precision_threshold_for_detection=0.0
)
sensitivity_analysis.evaluate()
plot_sensitivity_analysis(sensitivity_analysis=sensitivity_analysis,
save_filename=os.path.join(output_folder, 'sensitivity_analysis.pdf'),
colors=colors,
characteristic_names=characteristic_names,
characteristic_names_in_text=characteristic_names_in_text,
characteristic_names_delta_positions=characteristic_names_delta_positions,
figsize=figsize,
num_grids=num_grids)
if __name__ == '__main__':
parser = ArgumentParser(description='Run the sensitivity analysis.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--ground_truth_filename', required=True, type=str,
help='The path to the JSON file containing the ground truth annotations')
parser.add_argument('--subset', default='validation', type=str,
help='The dataset subset to use for the analysis')
parser.add_argument('--prediction_filename', required=True, type=str,
help='The path to the JSON file containing the method\'s predictions')
parser.add_argument('--output_folder', required=True, type=str,
help='The path to the folder in which the results will be saved')
parser.add_argument('--is_thumos14', default=False, action='store_true',
help='Pass this argument if the dataset used is THUMOS14 and not ActivityNet')
args = parser.parse_args()
main(args.ground_truth_filename, args.subset, args.prediction_filename, args.output_folder, args.is_thumos14)
|
135910
|
import shlex
from unittest import TestCase, mock
from compose_flow.commands import Workflow
from tests import BaseTestCase
TEST_PROJECT_NAME = "test_project_name"
@mock.patch("compose_flow.commands.subcommands.env.utils")
@mock.patch("compose_flow.commands.subcommands.env.get_backend")
class WorkflowTestCase(BaseTestCase):
@staticmethod
def _setup_docker_config_mock(*mocks):
get_backend_mock = mocks[-2]
get_backend_mock.return_value.read.return_value = "FOO=1\nBAR=2"
@staticmethod
def _setup_utils_mock(*mocks):
utils_mock = mocks[-1]
utils_mock.get_tag_version.return_value = "0.0.0"
utils_mock.render.side_effect = lambda x, **kwargs: x
def test_config_name_with_different_env(self, *mocks):
"""Ensure the config can have a different env prefix than the current env
This allows storage of environments other than the current one, for example, storing a default local config
in the dev remote so that multiple devs can access it to seed their environment
"""
command = shlex.split("-e dev -c local-test-project env cat")
workflow = Workflow(argv=command)
self.assertEquals("local-test-project", workflow.config_name)
def test_default_env_when_no_env_specified(self, *mocks):
self._setup_docker_config_mock(*mocks)
self._setup_utils_mock(*mocks)
command = shlex.split("env cat")
workflow = Workflow(argv=command)
env = workflow.environment
self.assertEqual([], sorted(env.data.keys()))
@mock.patch("compose_flow.commands.workflow.os")
def test_docker_image_prefix_default(self, *mocks):
"""
Ensure the default image name will not accidentally push the image to a remote registry
"""
os_mock = mocks[0]
os_mock.environ = {}
os_mock.path.exists.return_value = False
workflow = Workflow(argv=[])
self.assertEqual(workflow.docker_image_prefix, "localhost.localdomain")
@mock.patch("compose_flow.commands.workflow.settings")
@mock.patch("compose_flow.commands.workflow.os")
def test_docker_image_prefix_from_os_settings(self, *mocks):
os_mock = mocks[0]
os_mock.path.exists.return_value = False
settings_mock = mocks[1]
settings_mock.DOCKER_IMAGE_PREFIX = "foo"
workflow = Workflow(argv=[])
self.assertEqual(workflow.docker_image_prefix, "foo")
@mock.patch(
"compose_flow.commands.workflow.Workflow.app_config",
new_callable=mock.PropertyMock,
)
def test_docker_image_prefix_from_app_config(self, *mocks):
image_prefix = "registry.prefix.com/foo"
app_config_mock = mocks[0]
app_config_mock.return_value = {"build": {"image_prefix": image_prefix}}
workflow = Workflow(argv=[])
self.assertEqual(workflow.docker_image_prefix, image_prefix)
def test_load_env_when_env_specified(self, *mocks):
self._setup_docker_config_mock(*mocks)
self._setup_utils_mock(*mocks)
command = shlex.split("-e dev env cat")
workflow = Workflow(argv=command)
env = workflow.environment
self.assertEqual(["BAR", "FOO"], sorted(env.data.keys()))
self.assertEqual("1", env.data["FOO"])
self.assertEqual("2", env.data["BAR"])
@mock.patch(
"compose_flow.commands.workflow.Workflow.subcommand",
new_callable=mock.PropertyMock,
)
def test_setup_environment_flag(self, *mocks):
"""
Ensures the environment cache is set to an empty dictionary when
a workflow environment should not be setup
"""
subcommand_mock = mocks[0]
subcommand_mock.return_value.setup_environment = False
command = shlex.split("-e dev env cat")
workflow = Workflow(argv=command)
workflow._setup_environment()
self.assertEqual({}, workflow.environment._data)
@mock.patch("compose_flow.commands.workflow.print")
@mock.patch("compose_flow.commands.workflow.pkg_resources")
def test_version(self, *mocks):
"""
Ensure the --version arg just returns the version
"""
version = "0.0.0-test"
pkg_resources_mock = mocks[0]
pkg_resources_mock.require.return_value = [mock.Mock(version=version)]
command = shlex.split("--version")
workflow = Workflow(argv=command)
workflow.run()
print_mock = mocks[1]
print_mock.assert_called_with(version)
# noinspection PyUnusedLocal
@mock.patch("compose_flow.commands.workflow.PROJECT_NAME", new=TEST_PROJECT_NAME)
class WorkflowArgsTestCase(TestCase):
"""
Tests for parsing command line arguments
"""
def test_sensible_defaults_no_env(self, *mocks):
"""
Test sensible defaults when no environment is defined
"""
command = shlex.split("publish")
workflow = Workflow(argv=command)
self.assertEqual(None, workflow.args.environment)
self.assertEqual(None, workflow.args.remote)
self.assertEqual(TEST_PROJECT_NAME, workflow.config_name)
self.assertEqual(TEST_PROJECT_NAME, workflow.project_name)
def test_sensible_defaults_with_env(self, *mocks):
"""
Test sensible defaults when an environment is defined
"""
env = "dev"
command = shlex.split(f"-e {env} publish")
workflow = Workflow(argv=command)
self.assertEqual(env, workflow.args.environment)
self.assertEqual(env, workflow.args.remote)
self.assertEqual(f"{env}-{TEST_PROJECT_NAME}", workflow.config_name)
self.assertEqual(TEST_PROJECT_NAME, workflow.project_name)
def test_sensible_defaults_with_env_and_project(self, *mocks):
"""
Test sensible defaults when an environment and project name is defined
"""
env = "dev"
command = shlex.split(f"-e {env} --project-name foo publish")
workflow = Workflow(argv=command)
self.assertEqual(env, workflow.args.environment)
self.assertEqual(env, workflow.args.remote)
self.assertEqual(f"{env}-foo", workflow.config_name)
self.assertEqual("foo", workflow.project_name)
|
135925
|
from geocode.geocode import Geocode
from tqdm import tqdm
import pandas as pd
import numpy as np
import ast
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
import os
import logging
import time
logging.basicConfig(level=logging.INFO, format=',%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
logger = logging.getLogger(__name__)
benchmark_path = 'benchmark'
f_out_results = os.path.join(benchmark_path, 'benchmark_results.csv')
f_in = os.path.join(benchmark_path, 'benchmark_data.csv')
# Change this False to re-run geopy!
geopy_use_cache = True
def read_benchmark_data():
df = pd.read_csv(f_in)
df['true'] = df.true.apply(lambda s: [np.nan] if s == '[nan]' else ast.literal_eval(s))
return df
def predict_local_geocode(df):
gc = Geocode()
gc.prepare(recompute=False)
gc.init()
df['local_geocode_predicted'] = np.nan
df['local_geocode_is_correct'] = False
for i, row in tqdm(df.iterrows(), total=len(df)):
res = gc.decode(row['user.location'])
pred = np.nan
if len(res) > 0:
pred = res[0]['country_code']
df.loc[i, 'local_geocode_predicted'] = pred
df.loc[i, 'local_geocode_is_correct'] = pred in row['true']
acc = df.local_geocode_is_correct.sum()/len(df)
logger.info(f'Local-geocode accuracy: {acc:.3f}')
return df
def predict_geopy(df):
if os.path.isfile(f_out_results) or geopy_use_cache:
df_res = pd.read_csv(f_out_results)
df['predicted_geopy'] = df_res['predicted_geopy']
df['is_correct_geopy'] = df_res['is_correct_geopy']
else:
geolocator = Nominatim(user_agent="<PASSWORD>")
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
for i, row in tqdm(df.iterrows(), total=len(df)):
res = geocode(row['user.location'], addressdetails=True, exactly_one=True)
if res is None:
res = np.nan
else:
res = res.raw['address']['country_code'].upper()
df.loc[i, 'predicted_geopy'] = res
df.loc[i, 'is_correct_geopy'] = res in row['true']
acc = df.is_correct_geopy.sum()/len(df)
logger.info(f'Nomatim/geopy accuracy: {acc:.3f}')
return df
if __name__ == "__main__":
df = read_benchmark_data()
ts = time.time()
df = predict_geopy(df)
te = time.time()
if not geopy_use_cache:
logger.info(f'Total time local geopy: {(te-ts)/60:.1f} min')
ts = time.time()
df = predict_local_geocode(df)
te = time.time()
logger.info(f'Total time local local-geocode: {(te-ts):.1f} s')
df.to_csv(f_out_results, index=False)
|
135991
|
import pytest
from flask import Flask, render_template_string
from flask_mobility import Mobility
from flask_mobility.decorators import mobile_template, mobilized
class TestDecorators(object):
@pytest.fixture()
def app(self):
app = Flask(__name__)
Mobility(app)
@app.route("/")
@mobile_template("{mobile/}template.html")
def index(template):
return render_template_string(template)
# Default View
def mobilize():
return render_template_string("False")
# Mobilized view
@app.route("/mobilize")
@mobilized(mobilize)
def mobilize():
return render_template_string("True")
return app
def test_mobile_template_user_agent(self, app):
"""Test the mobile_template decorator"""
client = app.test_client()
# Check without mobile User-Agent header
assert b"template.html" == client.get("/").data
# Check with mobile User-Agent header
headers = [("User-Agent", "android")]
response = client.get("/", headers=headers)
assert b"mobile/template.html" == response.data
def test_mobile_template_cookie(self, app):
client = app.test_client()
assert b"template.html" == client.get("/").data
MOBILE_COOKIE = app.config.get("MOBILE_COOKIE")
client.set_cookie("localhost", MOBILE_COOKIE, "on")
assert b"mobile/template.html" == client.get("/").data
client.set_cookie("localhost", MOBILE_COOKIE, "off")
assert b"template.html" == client.get("/").data
def test_mobilized_user_agent(self, app):
"""Test the mobilized decorator"""
client = app.test_client()
# Check without mobile User-Agent header
assert b"False" == client.get("/mobilize").data
# Check with mobile User-Agent header
headers = [("User-Agent", "android")]
assert b"True" == client.get("/mobilize", headers=headers).data
def test_mobilized_cookie(self, app):
client = app.test_client()
assert b"False" == client.get("/mobilize").data
MOBILE_COOKIE = app.config.get("MOBILE_COOKIE")
client.set_cookie("localhost", MOBILE_COOKIE, "on")
assert b"True" == client.get("/mobilize").data
client.set_cookie("localhost", MOBILE_COOKIE, "off")
assert b"False" == client.get("/mobilize").data
|
136073
|
from datetime import date
from random import choice, sample, randint, uniform
from pepper.brain import RdfBuilder
from pepper.framework import UtteranceHypothesis, Context, Face
from pepper.framework.sensor.obj import Object
from pepper.language import Chat, Utterance, UtteranceType
places = ['Office']
friends = ['Piek', 'Lenka', 'Bram', 'Suzana', 'Selene', 'Lea', 'Thomas', 'Jaap', 'Tae']
binary_values = [True, False]
capsule_knows = {
"utterance": "dimitris knows piek",
"subject": {
"label": "dimitris",
"type": "person"
},
"predicate": {
"type": "knows"
},
"object": {
"label": "piek",
"type": "person"
},
"author": "tom",
"turn": 1,
"position": "0-25",
"date": date(2019, 1, 24)
}
capsule_is_from = {
"utterance": "bram is from mongolia",
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "be-from"
},
"object": {
"label": "netherlands",
"type": "location"
},
"author": "bram",
"chat": 1,
"turn": 1,
"position": "0-25",
"date": date(2018, 3, 19)
}
capsule_is_from_2 = {
"utterance": "bram is from mongolia",
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "be-from"
},
"object": {
"label": "mongolia",
"type": "location"
},
"author": "lenka",
"chat": 1,
"turn": 1,
"position": "0-25",
"date": date(2018, 3, 25)
}
capsule_is_from_3 = {
"utterance": "bram is from mongolia",
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "be-from"
},
"object": {
"label": "netherlands",
"type": "location"
},
"author": "bram",
"chat": 1,
"turn": 1,
"position": "0-25",
"date": date(2018, 3, 25)
}
def fake_objects(context):
# Office
if choice(binary_values) and context.location.label == 'Office':
objects = [Object('person', 0.79, None, None), Object('laptop', 0.88, None, None),
Object('chair', 0.88, None, None), Object('laptop', 0.51, None, None),
Object('bottle', 0.88, None, None)]
elif choice(binary_values) and context.location.label == 'Office':
objects = [Object('person', 0.79, None, None), Object('plant', 0.88, None, None),
Object('chair', 0.88, None, None), Object('laptop', 0.51, None, None)]
elif choice(binary_values) and context.location.label == 'Office':
objects = [Object('person', 0.79, None, None), Object('plant', 0.88, None, None),
Object('chair', 0.88, None, None), Object('laptop', 0.51, None, None),
Object('book', 0.88, None, None), Object('laptop', 0.51, None, None)]
# Market
elif choice(binary_values) and context.location.label == 'Market':
objects = [Object('apple', 0.79, None, None), Object('banana', 0.88, None, None),
Object('avocado', 0.51, None, None), Object('banana', 0.88, None, None)]
elif choice(binary_values) and context.location.label == 'Market':
objects = [Object('apple', 0.79, None, None), Object('banana', 0.88, None, None),
Object('avocado', 0.51, None, None), Object('strawberry', 0.88, None, None)]
# Playground
elif choice(binary_values) and context.location.label == 'Playground':
objects = [Object('person', 0.79, None, None), Object('teddy bear', 0.88, None, None),
Object('teddy bear', 0.88, None, None), Object('cat', 0.51, None, None)]
elif choice(binary_values) and context.location.label == 'Playground':
objects = [Object('apple', 0.79, None, None), Object('banana', 0.88, None, None),
Object('cat', 0.51, None, None), Object('banana', 0.88, None, None)]
# Other
else:
if context.location.label != 'Market':
objects = [Object('teddy bear', 0.79, None, None), Object('dog', 0.88, None, None),
Object('cat', 0.51, None, None), Object('dog', 0.88, None, None)]
else:
objects = [Object('apple', 0.79, None, None), Object('banana', 0.88, None, None),
Object('avocado', 0.51, None, None), Object('strawberry', 0.88, None, None)]
return objects
def fake_people():
num_people = randint(0, len(friends))
people = sample(friends, num_people)
faces = set()
for peep in people:
confidence = uniform(0, 1)
f = Face(peep, confidence, None, None, None)
faces.add(f)
# Add strangers?
if choice(binary_values):
confidence = uniform(0, 1)
faces.add(Face('Stranger', confidence, None, None, None))
return faces
def fake_context(empty=False, no_people=False, place=False):
context = Context()
# Set place
if place:
context.location._label = choice(places)
faces = fake_people()
objects = fake_objects(context)
# Set objects
if not empty:
context.add_objects(objects)
context.add_people(faces)
if not no_people:
context.add_objects(objects)
return context
def fake_chat(capsule, context):
chat = Chat(capsule['author'], context)
chat.set_id(capsule['chat'])
return chat
def fake_utterance(capsule, chat):
hyp = UtteranceHypothesis(capsule['utterance'], 0.99)
utt = Utterance(chat, [hyp], False, capsule['turn'])
utt._type = UtteranceType.STATEMENT
utt.set_turn(capsule['turn'])
return utt
def fake_triple(capsule, utt):
builder = RdfBuilder()
triple = builder.fill_triple(capsule['subject'], capsule['predicate'], capsule['object'])
utt.set_triple(triple)
utt.pack_perspective(capsule['perspective'])
def transform_capsule(capsule, empty=False, no_people=False, place=False):
context = fake_context(empty=empty, no_people=no_people, place=place)
context.set_datetime(capsule['date'])
chat = fake_chat(capsule, context)
utt = fake_utterance(capsule, chat)
fake_triple(capsule, utt)
return utt
|
136096
|
from sportsdataverse.nba.nba_loaders import *
from sportsdataverse.nba.nba_pbp import *
from sportsdataverse.nba.nba_schedule import *
from sportsdataverse.nba.nba_teams import *
|
136128
|
import utils
import argparse
import pathlib
from collections import namedtuple
import itertools
import math
parser = argparse.ArgumentParser(description="Generate run descriptions")
parser.add_argument("base_dir", type=str,
help="Base directory for run descriptions")
args = parser.parse_args()
base_dir = pathlib.Path(args.base_dir)
EPOCHS = 800
NUM_REPEATS = 3
# Navier-Stokes base parameters
NS_END_TIME = 0.08 * 65
NS_DT = 0.08
NS_STEPS = math.ceil(NS_END_TIME / NS_DT)
NS_SUBSAMPLE = 1
EVAL_INTEGRATORS = ["leapfrog", "euler", "rk4"]
TRAIN_NOISE_VAR = 1e-3
N_OBSTACLES = 1
COARSE_LEVELS = [1] # Used for time skew parameter for training & validation
TRAIN_SET_SIZES = [25, 50, 100]
writable_objects = []
experiment_general = utils.Experiment("ns-runs")
experiment_step = utils.Experiment("ns-runs-step")
experiment_deriv = utils.Experiment("ns-runs-deriv")
train_source = utils.NavierStokesMeshInitialConditionSource(velocity_range=(1.0, 1.0), radius_range=(0.05, 0.1), n_obstacles=N_OBSTACLES)
val_source = utils.NavierStokesMeshInitialConditionSource(velocity_range=(1.0, 1.0), radius_range=(0.05, 0.1), n_obstacles=N_OBSTACLES)
eval_source = utils.NavierStokesMeshInitialConditionSource(velocity_range=(1.0, 1.0), radius_range=(0.05, 0.1), n_obstacles=N_OBSTACLES)
eval_outdist_source = utils.NavierStokesMeshInitialConditionSource(velocity_range=(1.0, 1.0), radius_range=(0.025, 0.05), n_obstacles=N_OBSTACLES)
train_sets = []
val_set = None
eval_sets = {}
# Generate data sets
# Generate train set
for num_traj in TRAIN_SET_SIZES:
_train_set = utils.NavierStokesDataset(experiment=experiment_general,
initial_cond_source=train_source,
set_type="train",
num_traj=num_traj,
subsampling=NS_SUBSAMPLE,
num_time_steps=NS_STEPS,
time_step_size=NS_DT)
train_sets.append(_train_set)
_val_set = utils.NavierStokesDataset(experiment=experiment_general,
initial_cond_source=val_source,
set_type="val",
num_traj=2,
subsampling=NS_SUBSAMPLE,
num_time_steps=NS_STEPS,
time_step_size=NS_DT)
val_set = _val_set
writable_objects.extend(train_sets)
writable_objects.append(val_set)
# Generate eval sets
for source, num_traj, type_key, step_multiplier in [
(eval_source, 5, "eval", 1),
#(eval_source, 3, "eval-long", 3),
(eval_outdist_source, 5, "eval-outdist", 1),
#(eval_outdist_source, 3, "eval-outdist-long", 3),
]:
for coarse in COARSE_LEVELS:
_ns_dt = NS_DT * coarse
_ns_steps = round(NS_END_TIME / _ns_dt)
_ns_subsample = NS_SUBSAMPLE * coarse
_eval_set = utils.NavierStokesDataset(experiment=experiment_general,
initial_cond_source=eval_source,
set_type=f"{type_key}-cors{coarse}",
num_traj=num_traj,
num_time_steps=_ns_steps,
subsampling=_ns_subsample,
time_step_size=_ns_dt)
_eval_set.name_tag = f"cors{coarse}"
if coarse not in eval_sets:
eval_sets[coarse] = []
eval_sets[coarse].append(_eval_set)
writable_objects.extend(itertools.chain.from_iterable(eval_sets.values()))
# Emit baseline integrator runs for each evaluation set
for integrator in (EVAL_INTEGRATORS + ["back-euler", "bdf-2"]):
for coarse in [1]: #COARSE_LEVELS:
for eval_set in eval_sets[coarse]:
# NO BASELINES YET
pass
# integration_run_double = utils.BaselineIntegrator(experiment=experiment_deriv,
# eval_set=eval_set,
# eval_dtype="double",
# integrator=integrator)
# integration_run_double.name_tag = f"cors{coarse}"
# writable_objects.append(integration_run_double)
# Emit KNN runs
# First, KNN predictors
for coarse, train_set in itertools.product(COARSE_LEVELS, train_sets):
for eval_set in eval_sets[coarse]:
knn_pred = utils.KNNPredictorOneshot(experiment_step,
training_set=train_set,
eval_set=eval_set,
step_time_skew=coarse,
step_subsample=1)
knn_pred.name_tag = f"cors{coarse}"
writable_objects.append(knn_pred)
# Next, KNN regressors
for train_set, integrator in itertools.product(train_sets, EVAL_INTEGRATORS):
for eval_set in eval_sets[1]:
pass
knn_reg = utils.KNNRegressorOneshot(experiment_deriv,
training_set=train_set,
eval_set=eval_set,
integrator=integrator)
writable_objects.append(knn_reg)
# DERIVATIVE: Emit MLP runs
for train_set, _repeat in itertools.product(train_sets, range(NUM_REPEATS)):
# Other networks work for all integrators
general_int_nets = []
# MLPs
for width, depth in [(4096, 4), (2048, 5)]:
mlp_deriv_train = utils.MLP(experiment=experiment_deriv,
training_set=train_set,
batch_size=375,
hidden_dim=width, depth=depth,
learning_rate=(1e-4),
predict_type="deriv",
noise_variance=TRAIN_NOISE_VAR,
validation_set=val_set, epochs=EPOCHS)
general_int_nets.append(mlp_deriv_train)
# CNNs
for cnn_arch in [
[(None, 32, 9), (32, 32, 9), (32, 32, 9), (32, None, 9)],
[(None, 64, 9), (64, 64, 9), (64, 64, 9), (64, None, 9)],
]:
cnn_deriv_train = utils.CNN(experiment=experiment_deriv,
training_set=train_set,
batch_size=375,
chans_inout_kenel=cnn_arch,
learning_rate=(1e-4),
predict_type="deriv",
padding_mode="replicate",
noise_variance=TRAIN_NOISE_VAR,
validation_set=val_set, epochs=EPOCHS)
general_int_nets.append(cnn_deriv_train)
# U-Net
unet_deriv_train = utils.UNet(
experiment=experiment_deriv,
training_set=train_set,
learning_rate=0.0004,
train_dtype="float",
batch_size=375,
epochs=EPOCHS,
validation_set=val_set,
predict_type="deriv",
noise_variance=TRAIN_NOISE_VAR,
)
general_int_nets.append(unet_deriv_train)
# Eval runs
writable_objects.extend(general_int_nets)
for trained_net, eval_set, integrator in itertools.product(general_int_nets, eval_sets[1], EVAL_INTEGRATORS):
eval_run = utils.NetworkEvaluation(experiment=experiment_deriv,
network=trained_net,
eval_set=eval_set,
integrator=integrator)
eval_run.name_tag = trained_net.name_tag
writable_objects.append(eval_run)
# STEP: Emit MLP runs
for coarse, train_set, _repeat in itertools.product(COARSE_LEVELS, train_sets, range(NUM_REPEATS)):
general_int_nets = []
for width, depth in [(4096, 4), (2048, 5)]:
mlp_step_train = utils.MLP(experiment=experiment_step,
training_set=train_set,
batch_size=375,
hidden_dim=width, depth=depth,
learning_rate=(1e-4),
predict_type="step",
step_time_skew=coarse, step_subsample=1,
noise_variance=TRAIN_NOISE_VAR,
validation_set=val_set, epochs=EPOCHS)
mlp_step_train.name_tag = f"cors{coarse}"
general_int_nets.append(mlp_step_train)
# CNNs
for cnn_arch in [
[(None, 32, 9), (32, 32, 9), (32, 32, 9), (32, None, 9)],
[(None, 64, 9), (64, 64, 9), (64, 64, 9), (64, None, 9)],
]:
cnn_step_train = utils.CNN(experiment=experiment_step,
training_set=train_set,
batch_size=375,
chans_inout_kenel=cnn_arch,
learning_rate=(1e-4),
predict_type="step",
step_time_skew=coarse, step_subsample=1,
padding_mode="replicate",
noise_variance=TRAIN_NOISE_VAR,
validation_set=val_set, epochs=EPOCHS)
cnn_step_train.name_tag = f"cors{coarse}"
general_int_nets.append(cnn_step_train)
# U-Net
unet_step_train = utils.UNet(
experiment=experiment_step,
training_set=train_set,
learning_rate=0.0004,
train_dtype="float",
batch_size=375,
epochs=EPOCHS,
validation_set=val_set,
predict_type="step",
step_time_skew=coarse,
step_subsample=1,
noise_variance=TRAIN_NOISE_VAR,
)
unet_step_train.name_tag = f"cors{coarse}"
general_int_nets.append(unet_step_train)
writable_objects.extend(general_int_nets)
for trained_net, eval_set in itertools.product(general_int_nets, eval_sets[coarse]):
eval_run = utils.NetworkEvaluation(experiment=experiment_step,
network=trained_net,
eval_set=eval_set,
integrator="null")
eval_run.name_tag = trained_net.name_tag
writable_objects.append(eval_run)
if __name__ == "__main__":
for obj in writable_objects:
obj.write_description(base_dir)
|
136149
|
SHORT_DESCRIPTION = "Sorter organises/sorts files using a customised search function to group those that have similar characteristics into a single folder. Similar characteristics include file type, file name or part of the name and file category. You can put all letters documents into one folder, all images with the word home into another, all music by one artist in yet another folder, etc."
SOURCE_DESCRIPTION = "SOURCE (required)\nThis is the folder in which the sorting should be done i.e the folder containing the disorganised files."
DESTINATION_DESCRIPTION = "DESTINATION (optional)\nAn optional destination (a folder) where the user would want the sorted files/folders to be moved to."
RECURSIVE_DESCRIPTION = "LOOK INTO SUB-FOLDERS (optional)\nChecks into every child folder, starting from the source folder, and groups/sorts the files accordingly."
TYPES_DESCRIPTION = "SELECT FILE TYPES (optional)\nSelect the specific file types/formats to be sorted."
SEARCH_DESCRIPTION = "SEARCH FOR (optional)\nDirects Sorter to search and only group files with names containing this value. If this is enabled then, by default, Sort Folders option is enabled to enable the sorted files to be moved to a folder whose name will be the value provided here. The search is case-insensitive but the final folder will adopt the case styles."
GROUP_FOLDER_DESCRIPTION = "GROUP INTO FOLDER (optional)\nMoves all files (and folders) fitting the search descriptions into a folder named by the value provided in this option."
BY_EXTENSION_DESCRIPTION = "GROUP BY FILE TYPE (optional)\nGroups files in the destination and according to their file type. That is, all JPGs different from PDFs different from DOCXs."
CLEANUP_DESCRIPTION = "PERFORM CLEANUP (optional)\nLooks into the child folders of the source folder and removes those which are empty."
NOTE = "Note:\nIf you want a folder and its contents to be left as is (i.e. not to be sorted or affected in any way), just add a file named `.signore` (no extension) into the folder."
HELP_MESSAGE = "How it Works \n" + SHORT_DESCRIPTION + "\n\nBelow is a description of the fields required to achieve results using Sorter:\n\n" + SOURCE_DESCRIPTION + "\n\n" + DESTINATION_DESCRIPTION + \
"\n\n" + SEARCH_DESCRIPTION + "\n\n" + RECURSIVE_DESCRIPTION + \
"\n\n" + TYPES_DESCRIPTION + "\n\n" + \
GROUP_FOLDER_DESCRIPTION + "\n\n" + BY_EXTENSION_DESCRIPTION + "\n\n" + CLEANUP_DESCRIPTION + \
"\n\n" + NOTE
COPYRIGHT_MESSAGE = "Copyright \u00a9 2017\n\n<NAME>\nAll rights reserved.\n\n"
HOMEPAGE = "https://giantas.github.io/sorter"
SOURCE_CODE = "https://github.com/giantas/sorter"
LICENSE = """BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
|
136161
|
def includeme(config):
""" Set up event subscribers. """
from .models import (
AuthUserMixin,
random_uuid,
lower_strip,
encrypt_password,
)
add_proc = config.add_field_processors
add_proc(
[random_uuid, lower_strip],
model=AuthUserMixin, field='username')
add_proc([lower_strip], model=AuthUserMixin, field='email')
add_proc([encrypt_password], model=AuthUserMixin, field='password')
|
136186
|
import unittest
import json
from lax_response_adapter import LaxResponseAdapter
from mock import Mock
from provider.utils import base64_encode_string
FAKE_TOKEN = json.dumps(
{
u"status": u"vor",
u"expanded_folder": u"837411455.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
u"version": u"1",
u"force": False,
u"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
}
)
FAKE_LAX_MESSAGE = json.dumps(
{
"status": "published",
"requested-action": "publish",
"datetime": "2013-03-26T00:00:00+00:00",
"token": base64_encode_string(FAKE_TOKEN),
"id": "837411455",
}
)
WORKFLOW_MESSAGE_EXPECTED = {
"workflow_data": {
"article_id": u"837411455",
"expanded_folder": u"837411455.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"message": None,
"requested_action": u"publish",
"force": False,
"result": u"published",
"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"status": u"vor",
"update_date": "2013-03-26T00:00:00Z",
"version": u"1",
"run_type": None,
},
"workflow_name": "PostPerfectPublication",
}
FAKE_TOKEN_269 = json.dumps(
{
u"status": u"vor",
u"expanded_folder": u"00269.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
u"version": u"1",
u"force": False,
u"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
}
)
FAKE_LAX_MESSAGE_269 = json.dumps(
{
"status": "published",
"requested-action": "publish",
"datetime": "2013-03-26T00:00:00+00:00",
"token": base64_encode_string(FAKE_TOKEN_269),
"id": "269",
}
)
WORKFLOW_MESSAGE_EXPECTED_269 = {
"workflow_data": {
"article_id": u"269",
"expanded_folder": u"00269.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"message": None,
"requested_action": u"publish",
"force": False,
"result": u"published",
"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"status": u"vor",
"update_date": "2013-03-26T00:00:00Z",
"version": u"1",
"run_type": None,
},
"workflow_name": "PostPerfectPublication",
}
FAKE_SILENT_INGEST_TOKEN = json.dumps(
{
u"status": u"vor",
u"run_type": "silent-correction",
u"expanded_folder": u"837411455.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
u"version": u"1",
u"force": True,
u"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
}
)
FAKE_SILENT_INGEST_LAX_MESSAGE = json.dumps(
{
"datetime": "2013-03-26T00:00:00+00:00",
"force": True,
"status": "ingested",
"id": "837411455",
"token": base64_encode_string(FAKE_SILENT_INGEST_TOKEN),
"validate-only": False,
"requested-action": "ingest",
}
)
WORKFLOW_MESSAGE_EXPECTED_SILENT_INGEST = {
"workflow_data": {
"article_id": u"837411455",
"expanded_folder": u"837411455.1/a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"message": None,
"requested_action": u"ingest",
"force": True,
"result": u"ingested",
"run": u"a8bb05df-2df9-4fce-8f9f-219aca0b0148",
"status": u"vor",
"update_date": "2013-03-26T00:00:00Z",
"version": u"1",
"run_type": "silent-correction",
},
"workflow_name": "SilentCorrectionsProcess",
}
class TestLaxResponseAdapter(unittest.TestCase):
def setUp(self):
settings = Mock()
self.logger = Mock()
self.laxresponseadapter = LaxResponseAdapter(settings, self.logger)
def test_parse_message(self):
expected_workflow_starter_message = self.laxresponseadapter.parse_message(
FAKE_LAX_MESSAGE
)
self.assertDictEqual(
expected_workflow_starter_message, WORKFLOW_MESSAGE_EXPECTED
)
def test_parse_message_269(self):
expected_workflow_starter_message = self.laxresponseadapter.parse_message(
FAKE_LAX_MESSAGE_269
)
self.assertDictEqual(
expected_workflow_starter_message, WORKFLOW_MESSAGE_EXPECTED_269
)
def test_parse_message_silent_ingest(self):
expected_workflow_starter_message = self.laxresponseadapter.parse_message(
FAKE_SILENT_INGEST_LAX_MESSAGE
)
self.assertDictEqual(
expected_workflow_starter_message, WORKFLOW_MESSAGE_EXPECTED_SILENT_INGEST
)
if __name__ == "__main__":
unittest.main()
|
136199
|
import librosa
import torch
import torchaudio
from torchaudio.transforms import Resample, Spectrogram
def load(path, sample_rate=22050):
waveform, source_rate = torchaudio.load(path)
if len(waveform) > 1:
waveform = waveform.mean(dim=0)
if source_rate != sample_rate:
resample = Resample(source_rate, sample_rate)
waveform = resample(waveform)
return waveform
class SignalProcessor:
def __init__(
self,
sample_rate=22050,
n_fft=1024,
win_length=1024,
hop_length=256,
f_min=0,
f_max=8000,
n_mels=80,
a_min=1e-5,
):
self.spect_fn = Spectrogram(
n_fft=n_fft, win_length=win_length, hop_length=hop_length, power=1,
)
self.mel_basis = torch.from_numpy(
librosa.filters.mel(sample_rate, n_fft, n_mels, f_min, f_max)
).float()
self.a_min = a_min
def spectrogram(self, wav):
return self.spect_fn(wav)
def mel_spectrogram(self, wav):
lin = self.spect_fn(wav)
return torch.matmul(self.mel_basis, lin)
def log_mel_spectrogram(self, wav):
mel = self.mel_spectrogram(wav)
return torch.log(torch.clamp(mel, min=self.a_min))
|
136252
|
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities import CameraRobotEnv, BaseRobotEnv
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_bases import *
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.robot_locomotors import Quadrotor3
from transforms3d import quaternions
import os
import numpy as np
import sys
import pybullet as p
from gibson.core.physics.scene_stadium import SinglePlayerStadiumScene
import pybullet_data
import cv2
import random
from gcg.envs.env_spec import EnvSpec
from collections import OrderedDict
from gcg.envs.spaces.box import Box
from gcg.envs.spaces.discrete import Discrete
from termcolor import colored
CALC_OBSTACLE_PENALTY = 1
tracking_camera = {
'yaw': 20,
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
tracking_camera_top = {
'yaw': 20, # demo: living room, stairs
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
class DroneNavigateEnv(CameraRobotEnv):
"""Specfy navigation reward
"""
def __init__(self, config, gpu_count=0):
#self.config = self.parse_config(config)
self.config = config
CameraRobotEnv.__init__(self, self.config, gpu_count,
scene_type="building",
tracking_camera=tracking_camera)
self.robot_introduce(Quadrotor3(self.config, env=self))
self.scene_introduce()
self.gui = self.config["mode"] == "gui"
self.total_reward = 0
self.total_frame = 0
def add_text(self, img):
font = cv2.FONT_HERSHEY_SIMPLEX
x,y,z = self.robot.body_xyz
r,p,ya = self.robot.body_rpy
cv2.putText(img, 'x:{0:.4f} y:{1:.4f} z:{2:.4f}'.format(x,y,z), (10, 20), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'ro:{0:.4f} pth:{1:.4f} ya:{2:.4f}'.format(r,p,ya), (10, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'potential:{0:.4f}'.format(self.potential), (10, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'fps:{0:.4f}'.format(self.fps), (10, 80), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
return img
def _rewards(self, action=None, debugmode=False):
a = action
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i, f in enumerate(
self.robot.feet): # TODO: Maybe calculating feet contacts could be done within the robot code
# print(f.contact_list())
contact_ids = set((x[2], x[4]) for x in f.contact_list())
# print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if (self.ground_ids & contact_ids):
# see Issue 63: https://github.com/openai/roboschool/issues/63
# feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
# print(self.robot.feet_contact)
#electricity_cost = self.electricity_cost * float(np.abs(a*self.robot.joint_speeds).mean()) # let's assume we
electricity_cost = self.stall_torque_cost * float(np.square(a).mean())
debugmode = 0
wall_contact = [pt for pt in self.robot.parts['base_link'].contact_list() if pt[6][2] > 0.15]
wall_collision_cost = self.wall_collision_cost * len(wall_contact)
joints_at_limit_cost = float(self.joints_at_limit_cost * self.robot.joints_at_limit)
close_to_goal = 0
if self.robot.dist_to_target() < 2:
close_to_goal = 0.5
obstacle_penalty = 0
debugmode = 0
debugmode = 0
if (debugmode):
print("Wall contact points", len(wall_contact))
print("Collision cost", wall_collision_cost)
print("electricity_cost", electricity_cost)
print("close to goal", close_to_goal)
#print("progress")
#print(progress)
#print("electricity_cost")
#print(electricity_cost)
#print("joints_at_limit_cost")
#print(joints_at_limit_cost)
#print("feet_collision_cost")
#print(feet_collision_cost)
rewards = [
#alive,
progress,
#wall_collision_cost,
close_to_goal,
obstacle_penalty
#electricity_cost,
#joints_at_limit_cost,
#feet_collision_cost
]
return rewards
def _termination(self, debugmode=False):
done = self.nframe > 250 or self.robot.get_position()[2] < 0
return done
def _reset(self):
self.total_frame = 0
self.total_reward = 0
obs = CameraRobotEnv._reset(self)
return obs
class GcgDroneNavigateEnv(DroneNavigateEnv):
def __init__(self, params={}, gpu_count=0):
DroneNavigateEnv.__init__(self, params, gpu_count)
self._obs_shape = params['obs_shape']
self._yaw_limits = params['yaw_limits']
self._horizon = params['horizon']
self._model_id = params['model_id']
self._setup_spec()
assert (self.observation_im_space.shape[-1] == 1 or self.observation_im_space.shape[-1] == 3)
self.spec = EnvSpec(self.observation_im_space,self.action_space,self.action_selection_space,self.observation_vec_spec,self.action_spec,self.action_selection_spec,self.goal_spec)
@property
def horizon(self):
return self._horizon
def _setup_spec(self):
self.action_spec = OrderedDict()
self.action_selection_spec = OrderedDict()
self.observation_vec_spec = OrderedDict()
self.goal_spec = OrderedDict()
self.action_spec['yaw'] = Box(low=-180, high=180)
self.action_space = Box(low=np.array([self.action_spec['yaw'].low[0]]),
high=np.array([self.action_spec['yaw'].high[0]]))
self.action_selection_spec['yaw'] = Box(low=self._yaw_limits[0], high=self._yaw_limits[1])
self.action_selection_space = Box(low = np.array([self.action_selection_spec['yaw'].low[0]]), high = np.array([self.action_selection_spec['yaw'].high[0]]))
assert (np.logical_and(self.action_selection_space.low >= self.action_space.low,
self.action_selection_space.high <= self.action_space.high).all())
self.observation_im_space = Box(low=0, high=255, shape=self._obs_shape)
self.observation_vec_spec['coll'] = Discrete(1)
def step(self, a):
observations, reward, _, env_info_internal = DroneNavigateEnv._step(self, a)
done = self.get_collision()
filtered_obs = self.get_filtered_observation(observations)
env_info = dict(x=env_info_internal["x"], y=env_info_internal["y"], yaw=env_info_internal["yaw"], height=env_info_internal["height"], speed=env_info_internal["speed"], model_id=self._model_id)
return filtered_obs, np.array([]), reward, done, env_info
def reset(self, offline=False, keep_rosbag=True):
observations = DroneNavigateEnv._reset(self)
filtered_obs = self.get_filtered_observation_reset(observations)
return filtered_obs, np.array([])
def ros_is_good(self, print=False):
return True
def get_collision(self):
collision = (len(self.robot.parts['base_link'].contact_list()) > 0) or (abs(self.robot.get_orientation_eulerian()[0]) > 0.5) or (abs(self.robot.get_orientation_eulerian()[1]) > 0.5)
if collision:
print("\n")
print(colored("COLLISION!!!!!", "green"))
print("\n")
print(colored("COLLISION!!!!!", "red"))
print("\n")
print(colored("COLLISION!!!!!", "yellow"))
print("\n")
return collision
def get_filtered_observation(self, observations):
image = observations['rgb_filled']
image_resized = cv2.cvtColor(cv2.resize(image, (96, 96))[12:84], cv2.COLOR_BGR2GRAY)
cv2.imshow('image', image_resized)
cv2.waitKey(5)
return (image_resized, np.array([int(self.get_collision())]))
def get_filtered_observation_reset(self, observations):
image = observations['rgb_filled']
image_resized = cv2.cvtColor(cv2.resize(image, (96, 96))[12:84], cv2.COLOR_BGR2GRAY)
cv2.imshow('image', image_resized)
cv2.waitKey(5)
return (image_resized, np.array([0]))
|
136256
|
import binascii
import struct
import os
import gevent
import ipaddress
import time
from gevent.lock import RLock
from gevent.event import AsyncResult
from gevent import socket
import collections
import traceback
try:
import color_logging
import logging
logger = logging
except:
import logging
logger = logging
logger.debug = logger.warning
ProtocolHandler = collections.namedtuple("ProtocolHandler", "permission, handler")
NonceCallback = collections.namedtuple("NonceCallback", "id, callback")
NextHop = collections.namedtuple("NextHop", "id, tcp_address")
Peer = collections.namedtuple("Peer", "socket, id, address")
CONNECTION_RETRIES = 1
NETWORK_TIMEOUT = 3.0
ZW_ADDRESS_LEN = 1
ZB_ADDRESS_LEN = 2
IP_ADDRESS_LEN = 4
STOP_MODE = (0, "STOP")
ADD_MODE = (1, "ADD")
DEL_MODE = (2, "DELTE")
ONLY_FROM_TCP_SERVER = 1
ONLY_FROM_TRANSPORT_INTERFACE = 2
VALID_FROM_ALL = 3
MASTER_ID = 0
MPTN_UDP_PORT = 5775 # 0x57 for 'W', 0x75 for 'u'
MPTN_TCP_PACKET_SIZE = struct.calcsize("!L")
MPTN_TCP_NONCE_SIZE = struct.calcsize("!8B")
GWIDREQ_PAYLOAD_LEN = 16
IDREQ_PAYLOAD_LEN = 16
ID_TO_STRING = lambda x: str(ipaddress.ip_address(x))
ID_INTERFACE_FROM_TUPLE = lambda ip, mask: ipaddress.ip_interface("%s/%s" % (ip, mask))
ID_INTERFACE_FROM_STRING = lambda x: ipaddress.ip_interface(x)
ID_NETWORK_FROM_TUPLE = lambda ip, mask: ipaddress.ip_interface("%s/%s" % (ip, mask)).network
ID_NETWORK_FROM_STRING = lambda x: ipaddress.ip_interface(x).network
ID_FROM_STRING = lambda x: int(ipaddress.ip_interface(x).ip)
IS_ID_IN_NETWORK = lambda x, network: ipaddress.ip_address(x) in network
VALUE_TO_STRING = lambda value: ":".join(map(lambda x:"%02X"%x, value))
# Acceptable payload format
# 4 bytes: destionation DID
# 4 bytes: source DID
# 1 byte: message type
# rest byte(s): payload
MPTN_ID_LEN = 4
MPTN_MAX_ID = 2 ** (MPTN_ID_LEN*8) - 1
MPTN_MSGTYPE_LEN = 1
MPTN_HEADER_FORMAT = (MPTN_ID_LEN, MPTN_ID_LEN, MPTN_MSGTYPE_LEN)
MPTN_DEST_BYTE_OFFSET = 0
MPTN_SRC_BYTE_OFFSET = MPTN_DEST_BYTE_OFFSET + MPTN_ID_LEN
MPTN_MSGTYPE_BYTE_OFFSET = MPTN_SRC_BYTE_OFFSET + MPTN_ID_LEN
MPTN_PAYLOAD_BYTE_OFFSET = MPTN_MSGTYPE_BYTE_OFFSET + MPTN_MSGTYPE_LEN
MPTN_MSGTYPE_GWDISCOVER = 0
MPTN_MSGTYPE_GWOFFER = 1
MPTN_MSGTYPE_IDREQ = 2
MPTN_MSGTYPE_IDACK = 3
MPTN_MSGTYPE_IDNAK = 4
MPTN_MSGTYPE_GWIDREQ = 5
MPTN_MSGTYPE_GWIDACK = 6
MPTN_MSGTYPE_GWIDNAK = 7
MPTN_MSGTYPE_RTPING = 8
MPTN_MSGTYPE_RTREQ = 9
MPTN_MSGTYPE_RTREP = 10
MPTN_MSGTYPE_RPCCMD = 16
MPTN_MSGTYPE_RPCREP = 17
MPTN_MSGTYPE_FWDREQ = 24
MPTN_MSGTYPE_FWDACK = 25
MPTN_MSGTYPE_FWDNAK = 26
WKPF_COMMAND_MONITOR = 0xB5
HEADER_FORMAT_STR = "!" + ''.join([{1:'B',2:'H',4:'I',8:'Q'}[i] for i in MPTN_HEADER_FORMAT])
def split_packet_to_list(message):
ret = []
for i in MPTN_HEADER_FORMAT:
ret.append(message[:i])
message = message[i:]
if len(message) > 0:
ret.append(message)
return ret
def formatted_print(msg):
"""Receives all message parts from socket, printing each frame neatly"""
r = "----------------------------------------\n"
for part in msg:
r += "[%03d]" % len(part) # Trailing comma suppresses newline
try:
r += "%s" % part.decode('ascii')
r += "\t("
r += r"0x%s" % (binascii.hexlify(part).decode('ascii'))
r += ")"
except UnicodeDecodeError:
r += r"0x%s" % (binascii.hexlify(part).decode('ascii'))
r += '\n'
return r
def create_packet_to_str(dest_id, src_id, msg_type, payload):
header = struct.pack(HEADER_FORMAT_STR, dest_id, src_id, msg_type)
if payload is not None: return header + payload
return header
def extract_packet_from_str(s):
if len(s) < MPTN_PAYLOAD_BYTE_OFFSET: return (None,None,None,None)
header, payload = s[:MPTN_PAYLOAD_BYTE_OFFSET], s[MPTN_PAYLOAD_BYTE_OFFSET:]
if payload == "": payload = None
return struct.unpack(HEADER_FORMAT_STR, header)+(payload,)
class Context(object):
__slots__ = ("id", "address", "direction", "socket", "nonce")
def __init__(self, mptn_id, address, direction, sock, nonce):
self.id = mptn_id
self.address = address
self.direction = direction
self.socket = sock
self.nonce = nonce
def new_if_context(addr):
return Context(None, addr, ONLY_FROM_TRANSPORT_INTERFACE, None, None)
process_message_handler = None
def set_message_handler(handler):
global process_message_handler
process_message_handler = handler
find_nexthop_for_id = None
def set_nexthop_lookup_function(lookup):
global find_nexthop_for_id
find_nexthop_for_id = lookup
self_id_net_endian_string = None
def set_self_id(mptn_id):
global self_id_net_endian_string
try:
self_id_net_endian_string = struct.pack("!L",socket.htonl(mptn_id))
except Exception as e:
logger.error("set_self_id unknown error: %s\n%s" % (str(e), traceback.format_exc()))
class ConnectionManager(object):
_manager = None
@classmethod
def init(cls):
if not cls._manager:
cls._manager = ConnectionManager()
return cls._manager
def __init__(self):
self.mptn_lock = RLock()
self.mptn_conn_by_addr = {} # map ID to a Peer (address, socket)
self.mptn_conn_by_id = collections.defaultdict(list)
self.nonce_lock = RLock()
self.nonce_cache = {} # map nonce to a NonceCallback (ID, listener) or map ID to a list of nonces [nonce, ...]
def __repr__(self):
return "ConnectionManager:\n\tconnections:\n\t\t%s\n\tnonces:\n\t\t%s" % (
str(self.mptn_conn_by_addr), str(self.nonce_cache))
def add_peer(self, address, peer):
with self.mptn_lock:
logger.debug("add_peer add address %s peer %s" % (str(address), str(peer)))
self.remove_peer(address)
self.mptn_conn_by_addr[address] = peer
self.mptn_conn_by_id[peer.id].append(peer)
def remove_peer(self, address):
with self.mptn_lock:
if address is None or address not in self.mptn_conn_by_addr:
# logger.info("remove_peer not find address %s. Continue, anyway" % str(address))
return
try:
peer = self.mptn_conn_by_addr.pop(address)
self.mptn_conn_by_id[peer.id] = [p for p in self.mptn_conn_by_id.pop(peer.id) if p.address != peer.address]
peer.socket.close()
except Exception as e:
logger.error("remove_peer socket closed for ID=%s address %s occurs error=%s\n%s" % (ID_TO_STRING(peer.id), str(address), str(e), traceback.format_exc()))
logger.info("remove_peer done for ID=%s address %s" % (ID_TO_STRING(peer.id), str(address)))
def get_peer_by_id(self, mptn_id):
with self.mptn_lock:
p_list = self.mptn_conn_by_id.get(mptn_id)
if p_list is None or len(p_list) == 0: return None
return p_list[0].socket
def add_nonce(self, nonce, nncb):
with self.nonce_lock:
old_nncb = self.pop_nonce(nonce)
if old_nncb is not None:
logger.error("nonce collision occurs. nonce of ID %s's replaces that of old ID %s's" % (ID_TO_STRING(nncb.id), ID_TO_STRING(old_nncb.id)))
old_nncb.callback.set(None)
del old_nncb
self.nonce_cache[nonce] = (nncb, int(round(time.time()*1000))+10000)
self.remove_timeout_nonce()
def pop_nonce(self, nonce):
with self.nonce_lock:
if nonce not in self.nonce_cache: return None
self.remove_timeout_nonce()
return self.nonce_cache.pop(nonce)[0]
def remove_timeout_nonce(self):
with self.nonce_lock:
for nonce, t in self.nonce_cache.items():
if t[1] < int(round(time.time() * 1000)):
t[0].callback.set(None)
self.nonce_cache.pop(nonce)
def handle_reply_message(context, dest_id, src_id, msg_type, payload):
nncb = ConnectionManager.init().pop_nonce(context.nonce)
if nncb is None:
logger.error("handle_reply_message no nonce %s exists" % str(map(ord, context.nonce)))
return
if nncb.id != src_id:
logger.error("handle_reply_message ID %s 0x%X not match nonce %s callback ID %s 0x%X" % (ID_TO_STRING(src_id), src_id, str(map(ord, context.nonce)), ID_TO_STRING(nncb.id), nncb.id))
return
context.id = src_id
nncb.callback.set((dest_id, src_id, msg_type, payload))
def handle_socket(sock, addr):
# logger.debug("handle_socket serve sock %s and addr %s" % (str(sock), str(addr)))
peer_id_string = ""
peer_id = MPTN_MAX_ID
try:
peer_id_string = sock.recv(MPTN_ID_LEN)
if peer_id_string == "":
sock.close()
peer_id = socket.ntohl(struct.unpack("!L", peer_id_string)[0])
if peer_id != MPTN_MAX_ID:
ConnectionManager.init().add_peer(addr, Peer(socket=sock, id=peer_id, address=addr))
except Exception as e:
logger.error("handle_socket peer_id_string:%s peer_id:%s error=%s\n%s" % (str(e), peer_id_string, str(ID_TO_STRING(peer_id)), traceback.format_exc()))
socket_recv(sock, addr, peer_id)
def socket_recv(sock, addr, peer_id):
while True:
nonce = ""
size_string = ""
size = 0
message = ""
try:
nonce = sock.recv(MPTN_TCP_NONCE_SIZE)
if nonce == "":
logger.error("handle_socket closed. nonce. addr=%s" % (str(addr)))
logger.debug("handle_socket closed. connections before %s" % str(ConnectionManager.init()))
ConnectionManager.init().remove_peer(addr)
logger.debug("handle_socket closed. connections after %s" % str(ConnectionManager.init()))
return
size_string = sock.recv(MPTN_TCP_PACKET_SIZE)
if size_string == "":
logger.error("handle_socket closed. size. addr=%s, nonce=%s" % (str(addr), str(map(ord, nonce))))
logger.debug("handle_socket closed. connections before %s" % str(ConnectionManager.init()))
ConnectionManager.init().remove_peer(addr)
logger.debug("handle_socket closed. connections after %s" % str(ConnectionManager.init()))
return
size = socket.ntohl(struct.unpack("!L", size_string)[0])
while len(message) < size:
part_msg = sock.recv(size - len(message))
if part_msg == "":
logger.error("handle_socket closed. message. addr=%s, nonce=%s, size_string=%s, size=%d, message=\n%s" % (
str(addr), str(map(ord, nonce)), str(map(ord, size_string)), size,
str(formatted_print(split_packet_to_list(message)))
)
)
logger.debug("handle_socket closed. connections before %s" % str(ConnectionManager.init()))
ConnectionManager.init().remove_peer(addr)
logger.debug("handle_socket closed. connections after %s" % str(ConnectionManager.init()))
return
message += part_msg
# logger.debug("handle_socket receive message from addr %s" % str(addr))
context = Context(peer_id, addr, ONLY_FROM_TCP_SERVER, sock, nonce)
process_message_handler(context, message)
# process_message_handler modify context.id
if peer_id == MPTN_MAX_ID and context.id != MPTN_MAX_ID:
ConnectionManager.init().add_peer(addr, Peer(socket=sock, id=context.id, address=addr))
peer_id = context.id
logger.debug("handle_socket update context id %s 0x%X from addr %s"%(ID_TO_STRING(context.id), context.id, str(addr)))
except Exception as e:
logger.error("handle_socket addr=%s, nonce=%s, size_string=%s, size=%d, message is \n%s\nerror=%s\n%s" % (str(addr),
str(map(ord, nonce)), str(map(ord, size_string)), size,
str(formatted_print(split_packet_to_list(message))), str(e),
traceback.format_exc()
)
)
logger.debug("handle_socket error before %s" % str(ConnectionManager.init()))
ConnectionManager.init().remove_peer(addr)
logger.debug("handle_socket error after %s" % str(ConnectionManager.init()))
return
gevent.sleep(0)
def reconnect(address):
sock = None
for i in xrange(CONNECTION_RETRIES):
try:
sock = socket.create_connection(address)#, NETWORK_TIMEOUT)
return sock
except IOError as e:
logger.error('reconnect to %s with error=%s' % (str(address), str(e)))
# gevent.sleep(3)
else:
if sock is not None: sock.close()
return None
def socket_send(context, dest_id, message, expect_reply=False):
if dest_id is None:
logger.error("socket_send dest ID %s is not valid"%ID_TO_STRING(dest_id))
return None
if context is not None and context.direction == ONLY_FROM_TCP_SERVER and context.id == dest_id:
# logger.debug("socket_send reuses socket since context ID is the same as dest_id %s" % str(dest_id))
next_hop_id = dest_id
address = context.address
sock = context.socket
nonce = context.nonce
else:
next_hop = find_nexthop_for_id(dest_id)
# logger.debug("socket_send next_hop is %s" % str(next_hop))
if next_hop is None:
logger.error("socket_send next hop for dest ID %s 0x%X cannot be found"%(ID_TO_STRING(dest_id), dest_id))
return None
next_hop_id = next_hop.id
address = next_hop.tcp_address
sock = ConnectionManager.init().get_peer_by_id(next_hop_id)
nonce = os.urandom(MPTN_TCP_NONCE_SIZE)
if sock is None:
# logger.debug("socket_send no socket found for ID %s"%ID_TO_STRING(next_hop_id))
sock = reconnect(address)
if sock is None:
# logger.error("socket_send cannot re-setup socket for next_hop_id=%s addr=%s msg is\n%s" % (ID_TO_STRING(next_hop_id), str(address), formatted_print(split_packet_to_list(message))))
return
try:
sock.send(self_id_net_endian_string)
except Exception as e:
logger.error("socket_send self_id_net_endian_string error=%s. addr=%s, self_id_net_endian_string=%s, nonce=%s, message is\n%s\nerror=%s\n%s" % (str(address), ID_TO_STRING(self_id_net_endian_string),
str(map(ord, nonce)),
str(formatted_print(split_packet_to_list(message))),
str(e), traceback.format_exc()
)
)
return
gevent.spawn(socket_recv, sock, address, next_hop_id)
ConnectionManager.init().add_peer(address, Peer(socket=sock, id=next_hop_id, address=address))
gevent.sleep(0)
# logger.debug("socket_send message %s to ID %s" % (str(message), ID_TO_STRING(next_hop_id)))
size = 0
try:
sock.send(nonce)
size = struct.pack("!L",socket.htonl(len(message)))
sock.send(size)
sock.sendall(message)
except Exception as e:
logger.error("socket_send nonce addr=%s, self_id_net_endian_string=%s, nonce=%s, message is\n%s\nerror=%s\n%s" % (str(address),
ID_TO_STRING(self_id_net_endian_string),
str(map(ord, nonce)),
str(formatted_print(split_packet_to_list(message))),
str(e), traceback.format_exc()
)
)
ConnectionManager.init().remove_peer(address)
return None
if not expect_reply: return None
callback = AsyncResult()
ConnectionManager.init().add_nonce(nonce, NonceCallback(dest_id, callback))
if context is None:
while not callback.ready():
gevent.sleep(0)
return callback.get()
# '''
# DBDict class: a DB on disk with a dictionary interface
# From mail.python.org/pipermail/python-list
# '''
# try:
# import cPickle as pickle
# except:
# import pickle
# import os, os.path
# import UserDict
# import sqlite3
# def to_db_type(value):
# """
# If value's type is supported natively in SQLite, return value.
# Otherwise, return a pickled representation.
# """
# if value is None or isinstance(value, (int, long, float, basestring)):
# return value
# else:
# return buffer(pickle.dumps(value))
# def from_db_type(value):
# """
# Converts a value from the database to a Python object.
# """
# if isinstance(value, buffer):
# return pickle.loads(str(value))
# else:
# return value
# class DBDict(UserDict.DictMixin):
# """
# Shelf implementation using an SQLite3 database.
# """
# def __init__(self, filename):
# if not os.path.isfile(filename):
# self._database = sqlite3.connect(filename)
# self._database.execute("CREATE TABLE IF NOT EXISTS Shelf "
# "(Key TEXT PRIMARY KEY NOT NULL, Value BLOB)")
# else:
# self._database = sqlite3.connect(filename)
# self._database.text_factory = str
# self._open = True
# def __del__(self):
# self.close()
# def __getitem__(self, key):
# row = self._database.execute("SELECT Value FROM Shelf WHERE Key=?",[key]).fetchone()
# if row:
# return from_db_type(row[0])
# else:
# raise KeyError(key)
# def __setitem__(self, key, value):
# self._database.execute("INSERT OR REPLACE INTO Shelf VALUES (?, ?)",[key, to_db_type(value)])
# self._database.commit()
# def __delitem__(self, key):
# if not self._database.execute("SELECT Key FROM Shelf WHERE Key=?",[key]).fetchone():
# raise KeyError(key)
# self._database.execute("DELETE FROM Shelf WHERE Key=?", [key])
# self._database.commit()
# def keys(self):
# """Return a list of keys in the shelf."""
# return [row[0] for row in self._database.execute("SELECT Key FROM Shelf")]
# def close(self):
# """Commit changes and close the file."""
# if self._database is not None:
# self._database.commit()
# self._database.close()
# self._database = None
'''
DBDict class: a Json file on disk with a dictionary interface
Note that Json only has string keys
'''
import os, os.path
import UserDict
import json
# http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
def _json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def _json_loads_byteified(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts = False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ _byteify(item, ignore_dicts=True) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
def to_db_type(value):
if value is None or isinstance(value, (int, long, float, basestring)):
return value
elif isinstance(value, list):
return [ to_db_type(item) for item in value ]
elif isinstance(value, tuple):
if type(value) is not tuple: # namedtuple
d = value._asdict()
d["__type_namedtuple__"] = value.__class__.__name__
return to_db_type(d)
else: # tuple
l = [ to_db_type(item) for item in value ]
l.append("__type_tuple__")
return l
elif isinstance(value, dict):
return { k: to_db_type(v) for k, v in value.iteritems() }
else:
return to_db_type(value.__dict__)
def from_db_type(value):
if isinstance(value, dict):
if "__type_namedtuple__" in value:
class_name = value.pop("__type_namedtuple__")
value = { k: from_db_type(v) for k, v in value.iteritems() }
return collections.namedtuple(class_name, value.keys())(**value)
else:
return { k: from_db_type(v) for k, v in value.iteritems() }
elif isinstance(value, list):
if value[-1] == "__type_tuple__":
value.pop()
return tuple([ from_db_type(item) for item in value ])
else:
return [ from_db_type(item) for item in value ]
else:
return value
class DBDict(UserDict.DictMixin):
def __init__(self, filename):
if not os.path.isfile(filename):
with open(filename, "w") as f:
json.dump({}, f, sort_keys=True,indent=2)
self._filename = filename
def __del__(self):
del self._lookup
def __getitem__(self, key):
return self.__getdict__()[str(key)]
def __setitem__(self, key, value):
lookup = self.__getdict__()
lookup[str(key)] = value
self.__setdict__(lookup)
def __delitem__(self, key):
lookup = self.__getdict__()
del lookup[str(key)]
self.__setdict__(lookup)
def __getdict__(self):
with open(self._filename, "r") as f:
obj = _json_load_byteified(f)
return {key:from_db_type(value) for (key,value) in obj.iteritems()}
def __setdict__(self, lookup):
with open(self._filename, 'w') as f:
json.dump({key:to_db_type(value) for (key,value) in lookup.iteritems()}, f, sort_keys=True,indent=2)
def keys(self):
lookup = self.__getdict__()
return lookup.keys()
|
136265
|
import pytest
import jax.random as jr
import jax.numpy as np
from jax import jit
import numpy as onp
from ssm.factorial_hmm import NormalFactorialHMM
SEED = jr.PRNGKey(0)
@jit
def identity(x):
return x
#### TESTS
def test_normal_factorial_hmm_jit():
fhmm = NormalFactorialHMM(num_states=(3, 4), seed=SEED)
identity(fhmm)
def test_normal_factorial_hmm_sample():
rng1, rng2 = jr.split(SEED, 2)
fhmm = NormalFactorialHMM(num_states=(3, 4), seed=SEED)
states, data = fhmm.sample(rng2, num_steps=10, num_samples=32)
assert len(states) == 2
assert states[0].shape == (32, 10)
assert states[0].min() >= 0 and states[0].max() < 3
assert states[1].shape == (32, 10)
assert states[1].min() >= 0 and states[0].max() < 4
assert data.shape == (32, 10)
def test_normal_factorial_hmm_sample_is_consistent():
# sampled from FHMM previously
true_states_var0 = np.array([[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 0, 0, 0, 0, 0, 0, 0, 1, 1]], dtype=np.int32)
true_states_var1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 2, 2, 2]], dtype=np.int32)
true_data = np.array([[ 0.6167348 , 0.64424795, 0.61341816, 0.591818 ,
0.6095528 , 0.62132317, 0.61306655, 0.6258184 ,
0.6209738 , 0.6320318 ],
[ 0.61753726, 0.08578929, 0.09964492, 0.08741929,
0.06252673, -0.29311082, -0.28715354, 0.4724555 ,
-2.3517041 , -2.3381226 ]],
dtype=np.float32)
rng1, rng2 = jr.split(SEED, 2)
fhmm = NormalFactorialHMM(num_states=(3, 4), seed=rng1)
states, data = fhmm.sample(rng2, num_steps=10, num_samples=2)
assert np.array_equal(states[0], true_states_var0)
assert np.array_equal(states[1], true_states_var1)
assert np.allclose(true_data, data, atol=1e-5)
def test_normal_factorial_hmm_em_fit():
rng1, rng2, rng3 = jr.split(SEED, 3)
true_fhmm = NormalFactorialHMM(num_states=(3, 4), seed=rng1)
states, data = true_fhmm.sample(rng2, num_steps=100, num_samples=3)
test_fhmm = NormalFactorialHMM(num_states=(3, 4), seed=rng3)
# fit with no early stopping
lp, fitted_fhmm, posteriors = test_fhmm.fit(data, num_iters=100, tol=-1)
# some simple tests
assert not np.any(np.isnan(lp))
assert posteriors.expected_states.shape == (3, 100, 3, 4)
|
136291
|
def define_display_nodes(tree,nodemap,unscoped_vectors=False,looped_definition=False):
if unscoped_vectors:
s = ''
else:
s = '\t\t\tstd::vector<MPILib::NodeId> display_nodes;\n'
display_nodes = tree.findall('.//Display')
for dn in display_nodes:
node_id = str(nodemap[dn.attrib['node']])
if looped_definition:
node_id = str(len(nodemap))+'*i+'+node_id
s += '\t\t\tdisplay_nodes.push_back('+ node_id + ');\n'
s += '\n'
return s
def define_rate_nodes(tree, nodemap,unscoped_vectors=False,looped_definition=False):
if unscoped_vectors:
s = ''
else:
s = '\t\t\tstd::vector<MPILib::NodeId> rate_nodes;\n'
s += '\t\t\tstd::vector<MPILib::Time> rate_node_intervals;\n'
rate_nodes = tree.findall('.//Rate')
for rn in rate_nodes:
node_id = str(nodemap[rn.attrib['node']])
if looped_definition:
node_id = str(len(nodemap))+'*i+'+node_id
t_interval = rn.attrib['t_interval']
s += '\t\t\trate_nodes.push_back('+ node_id + ');\n'
s += '\t\t\trate_node_intervals.push_back('+ t_interval + ');\n'
s += '\n'
return s
def define_density_nodes(tree,nodemap,unscoped_vectors=False,looped_definition=False):
if unscoped_vectors:
s = ''
else:
s = '\t\t\tstd::vector<MPILib::NodeId> density_nodes;\n'
s += '\t\t\tstd::vector<MPILib::Time> density_node_start_times;\n'
s += '\t\t\tstd::vector<MPILib::Time> density_node_end_times;\n'
s += '\t\t\tstd::vector<MPILib::Time> density_node_intervals;\n'
density_nodes = tree.findall('.//Density')
for dn in density_nodes:
node_id = str(nodemap[dn.attrib['node']])
if looped_definition:
node_id = str(len(nodemap))+'*i+'+node_id
t_start = dn.attrib['t_start']
t_end = dn.attrib['t_end']
t_interval = dn.attrib['t_interval']
s += '\t\t\tdensity_nodes.push_back('+ node_id + ');\n'
s += '\t\t\tdensity_node_start_times.push_back('+ t_start + ');\n'
s += '\t\t\tdensity_node_end_times.push_back('+ t_end + ');\n'
s += '\t\t\tdensity_node_intervals.push_back('+ t_interval + ');\n'
s += '\n'
return s
|
136335
|
from IMDBDatabase import IMDBData
# GUIDED PRACTICE
# Challenge 1.1 - The first step to data analysis is always to understand the
# database. Just like you can use a for loop to print all the elements in a list,
# use a for loop to print all the movieNames in IMDBData.
for movieName in IMDBData:
print(movieName)
# GUIDED PRACTICE
# Challenge 1.2 - Since IMDBData is a dictionary, you can access the data about
# a particular movie with IMDBData["Zootopia"]. Since each movie has a lot of
# data about it, it is also a dictionary. As you did in Challenge 1.1, use a
# for loop to print out all the characteristics that this database stores about
# a particular movie.
#
# Hint: Change the dictionary you are looping over in Challenge 1.1 to instead loop
# over the dictionary IMDBData["Zootopia"]
for attribute in IMDBData["Zootopia"]:
print(attribute)
# GUIDED PRACTICE
# Challenge 1.3 - Great, we now have an understanding of the data that is stored
# in the database! Let's see what that data is. For any one movie in the database,
# print out its stars, rating, genre, and year. An example of getting
# Zootopia's stars is below:
#
# print(IMDBData["Zootopia"]["Stars"])
print(IMDBData["Zootopia"]["Stars"])
print(IMDBData["Zootopia"]["Rating"])
print(IMDBData["Zootopia"]["Genre"])
print(IMDBData["Zootopia"]["Year"])
# GUIDED PRACTICE
# Challenge 1.4 - Now that you understand how the database is structured, let's
# look at the actual database! Open IMDBDatabase.py (NOT .pyc), and look at the
# information stored in the database and how it is structured.
# GUIDED PRACTICE
# Challenge 1.5 - Now let's start answering some questions about these movies.
# For starters, let us determine the highest rated movie in this database. Write
# a loop that goes over all the movies in the database, gets its rating,
# and prints the name and rating of the highest rated movie. Check your answer
# by looking at the actual database.
#
# Hint: You will need to have two variables, maxRating and maxRatedMovie,
# that keep track of the highest rated movie you have seen so far.
maxRating, maxRatedMovie = 0, ""
for movieName in IMDBData:
rating = IMDBData[movieName]["Rating"]
if (rating > maxRating):
maxRating = rating
maxRatedMovie = movieName
print(maxRatedMovie, " is the highest rated movie in the database, with rating ", maxRating)
# Challenge 1.6 - Now let's find the oldest movie in the database. Write a loop
# that goes over every movie in the database, get its year, and ends up printing
# the name and year of the oldest movie. Check your answer by looking at the
# actual database.
#
# Hint: Like in Challenge 1.5, you will have to maintain two variables as you
# go through the loop. But this time, they will keep track of the oldestYear you
# have seen so far, and the name of the oldestMovie.
# Challenge 1.7 - Now let's find the number of Animation movies in the database.
# Write a loop that goes over every movie in the database, get its genre, and
# ends up printing the number of Animation movies. Check your answer by
# looking at the actual database.
#
# Hint: This time, you will have to maintain one variable, which represents the
# number of Animation movies you have seen so far.
# BONUS Challenge 1.8 - As you saw above, the "Stars" attribute of a movie is a
# list of strings. It contains the names of the actors/actresses in the movie.
# Write a loop that determines how many of the movies in this database "<NAME>"
# has acted in. Check your answer by looking at the actual database.
#
# Hint 1: Like in Challenge 1.5, you will have to loop over the list and have an
# if statement in the loop. But this time, your if statement wants to check
# whether the string "<NAME>" is in movieName's stars list.
#
# Hint 2: In Challenge 1.5, you had to keep one variable that keeps track of the
# highest rated movie you have seen so far. Similarly, in this question you
# will have to maintain one variable that keeps track of the number of <NAME>
# movies you have encountered so far in your loop.
# BONUS Challenge 1.9 - In Challenge 1.6, you wrote code to determine the number
# of <NAME> movies in the database. Now, modify it so that you can type a name in,
# and it will tell you the number of movies by that actor/actress in the database.
# Check your answer by looking at the actual database.
#
# Hint: Remember input()?
|
136336
|
import os
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torchvision
from matplotlib import rc
from piq import psnr, ssim
from data_management import (
CropOrPadAndResimulate,
Flatten,
Normalize,
RandomMaskDataset,
filter_acquisition_no_fs,
)
from find_adversarial import err_measure_l2, grid_attack
from operators import rotate_real, to_complex
# ----- load configuration -----
import config # isort:skip
import config_robustness as cfg_rob # isort:skip
from config_robustness import methods # isort:skip
# ------ general setup ----------
device = cfg_rob.device
save_path = os.path.join(config.RESULTS_PATH, "attacks")
save_results = os.path.join(save_path, "challenge_example_V0S15_adv.pkl")
do_plot = True
save_plot = True
# ----- attack setup -----
# select samples
sample_vol = 0
sample_sl = 15
it_init = 6
keep_init = 3
# select range relative noise
noise_rel = torch.tensor([0.00, 0.025]).float().unique(sorted=True)
print(noise_rel)
# select measure for reconstruction error
err_measure = err_measure_l2
# select reconstruction methods
methods_include = ["L1", "Tiramisu jit"]
methods = methods.loc[methods_include]
# select methods excluded from (re-)performing attacks
methods_no_calc = ["L1", "Tiramisu jit"]
# adjust constrast
v_min = 0.05
v_max = 4.5
# ----- perform attack -----
# load data and select sample
test_data_params = {
"mask_func": cfg_rob.mask_func,
"filter": [filter_acquisition_no_fs],
"num_sym_slices": 0,
"multi_slice_gt": False,
"simulate_gt": True,
"keep_mask_as_func": True,
"transform": torchvision.transforms.Compose(
[
CropOrPadAndResimulate((368, 368)),
Flatten(0, -3),
Normalize(reduction="mean", use_target=False),
],
),
}
test_data = RandomMaskDataset
test_data = test_data("val", **test_data_params)
lo, hi = test_data.get_slices_in_volume(sample_vol)
print(
"volume slices from {} to {}, selected {}".format(lo, hi, lo + sample_sl)
)
X_VOL = to_complex(
torch.stack([test_data[sl_idx][2] for sl_idx in range(lo, hi)], dim=0)
).to(device)
X_MAX = rotate_real(X_VOL)[:, 0:1, ...].max().cpu()
X_0 = to_complex(test_data[lo + sample_sl][2].to(device)).unsqueeze(0)
X_0 = X_0.repeat(it_init, *((X_0.ndim - 1) * (1,)))
Y_0 = cfg_rob.OpA(X_0)
# create result table and load existing results from file
results = pd.DataFrame(
columns=[
"name",
"X_adv_err",
"X_ref_err",
"X_adv_psnr",
"X_ref_psnr",
"X_adv_ssim",
"X_ref_ssim",
"X_adv",
"X_ref",
"Y_adv",
"Y_ref",
]
)
results.name = methods.index
results = results.set_index("name")
# load existing results from file
if os.path.isfile(save_results):
results_save = pd.read_pickle(save_results)
for idx in results_save.index:
if idx in results.index:
results.loc[idx] = results_save.loc[idx]
else:
results_save = results
# perform attacks
for (idx, method) in methods.iterrows():
if idx not in methods_no_calc:
(
results.loc[idx].X_adv_err,
results.loc[idx].X_ref_err,
results.loc[idx].X_adv,
results.loc[idx].X_ref,
results.loc[idx].Y_adv,
results.loc[idx].Y_ref,
) = grid_attack(
method,
noise_rel,
X_0,
Y_0,
store_data=True,
keep_init=keep_init,
err_measure=err_measure,
)
results.loc[idx].X_adv_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_ref_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_adv_ssim = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_ref_ssim = torch.zeros(len(noise_rel), X_0.shape[0])
for idx_noise in range(len(noise_rel)):
results.loc[idx].X_adv_psnr[idx_noise, ...] = psnr(
torch.clamp(
rotate_real(results.loc[idx].X_adv[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
reduction="none",
)
results.loc[idx].X_ref_psnr[idx_noise, ...] = psnr(
torch.clamp(
rotate_real(results.loc[idx].X_ref[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
reduction="none",
)
results.loc[idx].X_adv_ssim[idx_noise, ...] = ssim(
torch.clamp(
rotate_real(results.loc[idx].X_adv[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
size_average=False,
)
results.loc[idx].X_ref_ssim[idx_noise, ...] = ssim(
torch.clamp(
rotate_real(results.loc[idx].X_ref[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
size_average=False,
)
# save results
for idx in results.index:
results_save.loc[idx] = results.loc[idx]
os.makedirs(save_path, exist_ok=True)
results_save.to_pickle(save_results)
# select the worst example for each noise level and method (rel err)
results_max = pd.DataFrame(
columns=["name", "X_adv_err", "X_adv_psnr", "X_adv_ssim", "X_adv", "Y_adv"]
)
results_max.name = methods.index
results_max = results_max.set_index("name")
for (idx, method) in methods.iterrows():
_, idx_adv = results.loc[idx].X_adv_err.max(dim=1)
idx_noise = range(len(noise_rel))
results_max.loc[idx].X_adv_err = results.loc[idx].X_adv_err[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv_psnr = results.loc[idx].X_adv_psnr[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv_ssim = results.loc[idx].X_adv_ssim[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv = results.loc[idx].X_adv[
idx_noise, idx_adv, ...
]
results_max.loc[idx].Y_adv = results.loc[idx].Y_adv[
idx_noise, idx_adv, ...
]
# ----- plotting -----
def _implot(sub, im, vmin=v_min, vmax=v_max):
if im.shape[-3] == 2: # complex image
image = sub.imshow(
torch.sqrt(im.pow(2).sum(-3))[0, :, :].detach().cpu(),
vmin=vmin,
vmax=vmax,
)
else: # real image
image = sub.imshow(im[0, 0, :, :].detach().cpu(), vmin=vmin, vmax=vmax)
image.set_cmap("gray")
sub.set_xticks([])
sub.set_yticks([])
return image
if do_plot:
# LaTeX typesetting
rc("font", **{"family": "serif", "serif": ["Palatino"]})
rc("text", usetex=True)
X_0 = X_0.cpu()
Y_0 = Y_0.cpu()
# +++ ground truth +++
fig, ax = plt.subplots(clear=True, figsize=(2.5, 2.5), dpi=200)
im = _implot(ax, X_0)
if save_plot:
fig.savefig(
os.path.join(
save_path,
"fig_example_challenge_V{}S{}_adv_gt.pdf".format(
sample_vol, sample_sl
),
),
bbox_inches="tight",
pad_inches=0,
)
# method-wise plots
for (idx, method) in methods.iterrows():
# +++ reconstructions per noise level +++
for idx_noise in range(len(noise_rel)):
results_ref = results_max
X_cur = results_ref.loc[idx].X_adv[idx_noise, ...].unsqueeze(0)
# adv
fig, ax = plt.subplots(clear=True, figsize=(2.5, 2.5), dpi=200)
im = _implot(ax, X_cur)
ax.text(
360,
10,
"rel.~$\\ell_2$-err: {:.2f}\\%".format(
results_ref.loc[idx].X_adv_err[idx_noise].item() * 100
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
ax.text(
360,
30,
"PSNR: {:.2f}".format(
results_ref.loc[idx].X_adv_psnr[idx_noise].item()
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
ax.text(
360,
53,
"SSIM: {:.2f}".format(
results_ref.loc[idx].X_adv_ssim[idx_noise].item()
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
if save_plot:
fig.savefig(
os.path.join(
save_path,
"fig_example_challenge_V{}S{}_adv_".format(
sample_vol, sample_sl
)
+ method.info["name_save"]
+ "_{:.0e}".format(noise_rel[idx_noise].item())
+ ".pdf",
),
bbox_inches="tight",
pad_inches=0,
)
# not saved
fig.suptitle(
method.info["name_disp"]
+ " for rel. noise level = {:1.3f}".format(
noise_rel[idx_noise].item()
)
)
plt.show()
|
136339
|
from django.urls import path
from .views import index
app_name = "app"
urlpatterns = [
path("", index, name="index"),
]
|
136347
|
from .basic import BasicLoginHandler
from .certificate import CertificateLoginHandler
LOGIN_HANDLERS = [BasicLoginHandler, CertificateLoginHandler]
|
136379
|
from lenstronomy.GalKin.numeric_kinematics import NumericKinematics
from lenstronomy.GalKin.analytic_kinematics import AnalyticKinematics
__all__ = ['GalkinModel']
class GalkinModel(object):
"""
this class handles all the kinematic modeling aspects of Galkin
Excluded are observational conditions (seeing, aperture etc)
Major class to compute velocity dispersion measurements given light and mass models
The class supports any mass and light distribution (and superposition thereof) that has a 3d correspondance in their
2d lens model distribution. For models that do not have this correspondence, you may want to apply a
Multi-Gaussian Expansion (MGE) on their models and use the MGE to be de-projected to 3d.
The computation follows Mamon&Lokas 2005.
The class supports various types of anisotropy models (see Anisotropy class).
Solving the Jeans Equation requires a numerical integral over the 3d light and mass profile (see Mamon&Lokas 2005).
This class (as well as the dedicated LightModel and MassModel classes) perform those integral numerically with an
interpolated grid.
The cosmology assumed to compute the physical mass and distances are set via the kwargs_cosmo keyword arguments.
d_d: Angular diameter distance to the deflector (in Mpc)
d_s: Angular diameter distance to the source (in Mpc)
d_ds: Angular diameter distance from the deflector to the source (in Mpc)
The numerical options can be chosen through the kwargs_numerics keywords
interpol_grid_num: number of interpolation points in the light and mass profile (radially). This number should
be chosen high enough to accurately describe the true light profile underneath.
log_integration: bool, if True, performs the interpolation and numerical integration in log-scale.
max_integrate: maximum 3d radius to where the numerical integration of the Jeans Equation solver is made.
This value should be large enough to contain most of the light and to lead to a converged result.
min_integrate: minimal integration value. This value should be very close to zero but some mass and light
profiles are diverging and a numerically stable value should be chosen.
These numerical options should be chosen to allow for a converged result (within your tolerance) but not too
conservative to impact too much the computational cost. Reasonable values might depend on the specific problem.
"""
def __init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=None, analytic_kinematics=False):
"""
:param kwargs_model: keyword arguments describing the model components
:param kwargs_cosmo: keyword arguments that define the cosmology in terms of the angular diameter distances involved
:param kwargs_numerics: numerics keyword arguments
:param analytic_kinematics: bool, if True uses the analytic kinematic model
"""
if kwargs_numerics is None:
kwargs_numerics = {'interpol_grid_num': 200, # numerical interpolation, should converge -> infinity
'log_integration': True,
# log or linear interpolation of surface brightness and mass models
'max_integrate': 100,
'min_integrate': 0.001} # lower/upper bound of numerical integrals
if analytic_kinematics is True:
anisotropy_model = kwargs_model.get('anisotropy_model')
if not anisotropy_model == 'OM':
raise ValueError('analytic kinematics only available for OsipkovMerritt ("OM") anisotropy model.')
self.numerics = AnalyticKinematics(kwargs_cosmo=kwargs_cosmo, **kwargs_numerics)
else:
self.numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics)
self._analytic_kinematics = analytic_kinematics
def check_df(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy):
"""
checks whether the phase space distribution function of a given anisotropy model is positive.
Currently this is implemented by the relation provided by Ciotti and Morganti 2010 equation (10)
https://arxiv.org/pdf/1006.2344.pdf
:param r: 3d radius to check slope-anisotropy constraint
:param theta_E: Einstein radius in arc seconds
:param gamma: power-law slope
:param a_ani: scaled transition radius of the OM anisotropy distribution
:param r_eff: half-light radius in arc seconds
:return: equation (10) >= 0 for physical interpretation
"""
dr = 0.01 # finite differential in radial direction
r_dr = r + dr
sigmar2 = self.numerics.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy)
sigmar2_dr = self.numerics.sigma_r2(r_dr, kwargs_mass, kwargs_light, kwargs_anisotropy)
grav_pot = self.numerics.grav_potential(r, kwargs_mass)
grav_pot_dr = self.numerics.grav_potential(r_dr, kwargs_mass)
self.numerics.delete_cache()
return r * (sigmar2_dr - sigmar2 - grav_pot + grav_pot_dr) / dr
|
136388
|
from .local_diffeo_transform import LocalDiffeoTransform
from .local_diffeo_transformed_distribution import LocalDiffeoTransformedDistribution
from .lie_multipy_transform import (
LieMultiplyTransform,
SO3MultiplyTransform,
SE3MultiplyTransform,
)
from .so3_exp_transform import (
SO3ExpTransform,
SO3ExpCompactTransform,
SO3ExpBijectiveTransform,
)
from .so3_prior import SO3Prior
|
136391
|
from __future__ import absolute_import, division, unicode_literals
import json
import os
from six import iteritems
from six.moves import zip
from tqdm import tqdm
import zipfile
# these may be used by pick_group_fn/postprocess_fn
from six.moves import range # NOQA
from ..utils import download_utils, file_formats, fs_utils, job_utils
def run_select_step(options, exp_options):
# if files are to be copied, copy them
selection_copy_from = exp_options.get('selection_copy_from')
if selection_copy_from:
def symlink_file(filename):
fs_utils.symlink(
os.path.join(options['output_dir'], '..', selection_copy_from,
filename),
os.path.join(options['output_dir'], filename)
)
with job_utils.log_step("copying files from experiment '{}'"
.format(selection_copy_from)):
symlink_file('fasta')
symlink_file('metadata.json')
return
groups = exp_options['groups'].copy()
# run pick_group
with job_utils.log_step('running pick_group'):
pick_group_metadata = []
all_metadata_cache = {}
for group_name, group_options in iteritems(groups):
group_options = group_options.copy()
groups[group_name] = group_options
group_options['name'] = group_name
if 'dataset' not in group_options:
group_options['dataset'] = exp_options['dataset']
# fetch metadata
all_metadata_filename = os.path.join(
options['metadata_dir'],
group_options['dataset']['metadata'] + '.json'
)
if all_metadata_filename in all_metadata_cache:
all_metadata = all_metadata_cache[all_metadata_filename]
else:
if not os.path.exists(all_metadata_filename):
download_utils.download_file(
download_utils.url_for_file(
all_metadata_filename, options['urls_file'],
'metadata'
),
all_metadata_filename
)
with open(all_metadata_filename, 'r') as infile:
all_metadata = json.load(infile)
all_metadata_cache[all_metadata_filename] = all_metadata
# perform selection
group_metadata = job_utils.parse_multiline_lambda_str(
options['pick_group']
)(all_metadata, group_options, exp_options)
for metadata_entry in group_metadata:
metadata_entry['group'] = group_name
pick_group_metadata.append(metadata_entry)
fs_utils.mkdir_p(options['fasta_output_dir'])
# read, process, and write sequences
with job_utils.log_step('processing metadata entries'):
final_metadata = []
archive_cache = {}
file_counter = 1
for metadata_entry in tqdm(pick_group_metadata, mininterval=1,
file=job_utils.LoggerAsFile('kameris')):
group_options = groups[metadata_entry['group']]
# open archive file
archive_filename = os.path.join(
options['archives_dir'],
group_options['dataset']['archive'] + '.zip'
)
if archive_filename in archive_cache:
archive = archive_cache[archive_filename]
else:
if not os.path.exists(archive_filename):
download_utils.download_file(
download_utils.url_for_file(
archive_filename, options['urls_file'], 'archives'
),
archive_filename
)
archive = zipfile.ZipFile(archive_filename)
archive_cache[archive_filename] = archive
# fetch sequences
if 'filenames' in metadata_entry and 'postprocess' in options:
file_sequences = [
file_formats.read_fasta(archive.open(filename))
for filename in metadata_entry['filenames']
]
else:
# find path for file in archive
if 'filename' in metadata_entry:
filename = metadata_entry['filename']
else:
filename = metadata_entry['id'] + '.fasta'
if 'archive_folder' in group_options['dataset']:
filename = '{}/{}'.format(
group_options['dataset']['archive_folder'],
filename
)
# read file
file_sequences = file_formats.read_fasta(
archive.open(filename)
)
# run postprocess if required
if 'postprocess' in options:
new_metadata, sequences_list = zip(
*job_utils.parse_multiline_lambda_str(
options['postprocess']
)(metadata_entry, file_sequences, exp_options)
)
else:
new_metadata = [metadata_entry]
sequences_list = [file_sequences]
final_metadata.extend(new_metadata)
# write fasta files
for sequences, final_metadata_entry in zip(sequences_list,
new_metadata):
filename = str(file_counter).zfill(10) + '.fasta'
file_path = os.path.join(options['fasta_output_dir'], filename)
final_metadata_entry['filename'] = filename
file_formats.export_fasta(file_path, sequences)
file_counter += 1
# write metadata
with job_utils.log_step('writing metadata file'):
with open(options['metadata_output_file'], 'w') as outfile:
json.dump(final_metadata, outfile)
|
136423
|
from WebUtils.Funcs import htmlForDict
from .AdminSecurity import AdminSecurity
class Config(AdminSecurity):
def title(self):
return 'Config'
def writeContent(self):
self.writeln(htmlForDict(
self.application().config(), topHeading='Application'))
|
136429
|
from rdkit.Chem.rdMolDescriptors import CalcPBF
from ._base import Descriptor
__all__ = ("PBF",)
class PBF(Descriptor):
r"""PBF descriptor."""
__slots__ = ()
since = "1.1.2"
require_3D = True
@classmethod
def preset(cls, version):
yield cls()
def description(self):
return self.__class__.__name__
def __str__(self):
return self.__class__.__name__
def parameters(self):
return ()
def calculate(self):
return CalcPBF(self.get_3D_mol())
rtype = float
|
136436
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import numpy as np
from dataset.data_loader_kitti_reimpl import KITTIReader_traj
from models.vgg_warper_weak_shortcut_nobn import VGG_Warper
from utils.visual import colorcode, VisdomShow, pbar
from ops.flow_warper_pad_2x import FlowWarp
from ops.hardshinkloss import HardshinkLoss
from ops.laplace2d import Laplace2D
from skimage.measure import compare_ssim as ssim
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_mse as mse
args = {}
args['gpus'] = [0]
args['seed'] = 12345
torch.backends.cudnn.benchmark = True
# Initialize Pytorch Dataloader
datareader = KITTIReader_traj(is_test=True, max_interval=10, min_ntraj=10, max_ntraj=10, is_eval=True)
train_loader = torch.utils.data.DataLoader(
datareader, batch_size=4, shuffle=False, collate_fn=datareader.collate_fn, worker_init_fn=datareader.worker_init_fn, num_workers=4, pin_memory=True, drop_last = True)
class MModel(nn.Module):
def __init__(self):
super(MModel, self).__init__()
self.warp_cnn = VGG_Warper(9)
self.flow_warper = FlowWarp()
self.mseloss = nn.MSELoss(size_average=True, reduce=True)
self.hardshrinkloss = HardshinkLoss(0., 1.)
def forward(self, img_input, warp_input, img_gt):
warp_flow, masks, comp_imgs = self.warp_cnn(warp_input) # W*H*2
warp_imgs = self.flow_warper(img_input, warp_flow, padl=83)
comp_imgs = F.hardtanh(comp_imgs,0.,1.)
masks = F.sigmoid(masks)
recon_img = torch.mul(warp_imgs, masks)+torch.mul(comp_imgs,1-masks)
return recon_img, warp_flow, comp_imgs, masks
mmodel = MModel()
mmodel.cuda()
mmodel = nn.DataParallel(mmodel, device_ids=[0])
visual = VisdomShow('kitti_eval_10')
def test():
print('\n\n=========================== Testing ============================')
mmodel.eval()
mse_stor = []
ssim_stor = []
for batch_idx, (img_input, warp_input, img_gt, vid_mask, img_input_2x) in enumerate(train_loader):
img_input = Variable(img_input, volatile=True).cuda(args['gpus'][0])
img_input_2x = Variable(img_input_2x).cuda(args['gpus'][0])
warp_input = Variable(warp_input, volatile=True).cuda(args['gpus'][0])
img_gt = Variable(img_gt, volatile=True).cuda(args['gpus'][0])
vid_mask = Variable(vid_mask, volatile=True).cuda(args['gpus'][0])
# warp_input : [interval-1, 9, H, W]
# print(warp_input.shape) # ([1, 9, 9, 192, 256])
recon_img, warp_flow, comp_imgs, masks = mmodel(img_input_2x, warp_input, img_gt)
recon_img *= vid_mask
img_gt *= vid_mask
gen_seq = recon_img.data.cpu().numpy()
gt_seq = img_gt.data.cpu().numpy()
mses = np.zeros(gen_seq.shape[0])
ssims = np.zeros(gen_seq.shape[0])
for i in range(gen_seq.shape[0]):
gen = np.transpose(gen_seq[i,:,:,:], [1,2,0])
gt = np.transpose(gt_seq[i,:,:,:], [1,2,0])
mses[i] = mse(gen,gt)
ssims[i] = ssim(gt, gen, data_range=1., multichannel=True)
mse_stor.append(mses.reshape([-1,9]))
ssim_stor.append(ssims.reshape([-1,9]))
if batch_idx%1 == 0:
pbar(batch_idx, len(train_loader), 0)
if batch_idx%10 == 0:
mse_a = np.concatenate(mse_stor, axis=0)
ssim_a = np.concatenate(ssim_stor, axis=0)
psnr_all = -10*np.log(np.mean(mse_a, axis=0))/np.log(10)
ssim_all = np.mean(ssim_a, axis=0)
print('PSNR')
print(psnr_all)
print('SSIM')
print(ssim_all)
if batch_idx%10 == 0:
out_seq = torch.cat((img_input[(0,),:,:,:],recon_img), dim=0).data.cpu().numpy()
for i in range(out_seq.shape[0]):
out_seq[i,:,:,:] = visual.add_text(out_seq[i,:,:,:], str(i), (0,1,1))
out_gt = torch.cat((img_input[(0,),:,:,:],img_gt), dim=0).data.cpu().numpy()
for i in range(out_gt.shape[0]):
out_gt[i,:,:,:] = visual.add_text(out_gt[i,:,:,:], 'GT', (0,1,0))
out_seq = np.concatenate((out_seq,out_gt), axis=3)
visual.show_vid(out_seq)
mse_a = np.concatenate(mse_stor, axis=0)
ssim_a = np.concatenate(ssim_stor, axis=0)
psnr_all = -10*np.log(np.mean(mse_a, axis=0))/np.log(10)
ssim_all = np.mean(ssim_a, axis=0)
print('\nPSNR SSIM')
for i in range(psnr_all.size):
print('{} {}'.format(psnr_all[i], ssim_all[i]))
def restore(ckpt_file):
ckpt = torch.load(ckpt_file)
mmodel.module.load_state_dict(ckpt['mmodel_state_dict'])
#optimizer.load_state_dict(ckpt['optimizer'])
#hist = ckpt['hist']
print('Restored from {}'.format(ckpt_file))
restore('./snapshots/kitti/ckpt_e0_b0_rev2.pth')
test()
|
136455
|
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_guardduty
@mock_guardduty
def test_create_detector():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_create_detector_with_minimal_params():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(Enable=True)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_list_detectors_initial():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.list_detectors()
response.should.have.key("DetectorIds").equals([])
@mock_guardduty
def test_list_detectors():
client = boto3.client("guardduty", region_name="us-east-1")
d1 = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)["DetectorId"]
d2 = client.create_detector(Enable=False,)["DetectorId"]
response = client.list_detectors()
response.should.have.key("DetectorIds")
set(response["DetectorIds"]).should.equal({d1, d2})
|
136459
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from common.backbone.resnet.resnet import *
from common.backbone.resnet.resnet import Bottleneck, BasicBlock
from common.backbone.resnet.resnet import model_urls
from common.lib.roi_pooling.roi_pool import ROIPool
from common.lib.roi_pooling.roi_align import ROIAlign
from common.utils.flatten import Flattener
from common.utils.pad_sequence import pad_sequence
from common.utils.bbox import coordinate_embeddings
class FastRCNN(nn.Module):
def __init__(self, config, average_pool=True, final_dim=768, enable_cnn_reg_loss=False):
"""
:param config:
:param average_pool: whether or not to average pool the representations
:param final_dim:
:param is_train:
"""
super(FastRCNN, self).__init__()
self.average_pool = average_pool
self.enable_cnn_reg_loss = enable_cnn_reg_loss
self.final_dim = final_dim
self.image_feat_precomputed = config.NETWORK.IMAGE_FEAT_PRECOMPUTED
if self.image_feat_precomputed:
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
else:
self.stride_in_1x1 = config.NETWORK.IMAGE_STRIDE_IN_1x1
self.c5_dilated = config.NETWORK.IMAGE_C5_DILATED
self.num_layers = config.NETWORK.IMAGE_NUM_LAYERS
self.pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.IMAGE_PRETRAINED,
config.NETWORK.IMAGE_PRETRAINED_EPOCH) if config.NETWORK.IMAGE_PRETRAINED != '' else None
self.output_conv5 = config.NETWORK.OUTPUT_CONV5
if self.num_layers == 18:
self.backbone = resnet18(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 34:
self.backbone = resnet34(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 50:
self.backbone = resnet50(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 101:
self.backbone = resnet101(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 152:
self.backbone = resnet152(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
else:
raise NotImplemented
output_size = (14, 14)
self.roi_align = ROIAlign(output_size=output_size, spatial_scale=1.0 / 16)
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
self.mask_upsample = None
self.roi_head_feature_extractor = self.backbone._make_layer(block=block, planes=512, blocks=3,
stride=2 if not self.c5_dilated else 1,
dilation=1 if not self.c5_dilated else 2,
stride_in_1x1=self.stride_in_1x1)
if average_pool:
self.head = torch.nn.Sequential(
self.roi_head_feature_extractor,
nn.AvgPool2d(7 if not self.c5_dilated else 14, stride=1),
Flattener()
)
else:
self.head = self.roi_head_feature_extractor
if config.NETWORK.IMAGE_FROZEN_BN:
for module in self.roi_head_feature_extractor.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
frozen_stages = config.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES
if 5 in frozen_stages:
for p in self.roi_head_feature_extractor.parameters():
p.requires_grad = False
frozen_stages = [stage for stage in frozen_stages if stage != 5]
self.backbone.frozen_parameters(frozen_stages=frozen_stages,
frozen_bn=config.NETWORK.IMAGE_FROZEN_BN)
if self.enable_cnn_reg_loss:
self.regularizing_predictor = torch.nn.Linear(2048, 81)
self.obj_downsample = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(2 * 2048 + (128 if config.NETWORK.IMAGE_SEMANTIC else 0), final_dim),
torch.nn.ReLU(inplace=True),
)
def init_weight(self):
if not self.image_feat_precomputed:
if self.pretrained_model_path is None:
pretrained_model = model_zoo.load_url(model_urls['resnet{}'.format(self.num_layers)])
else:
pretrained_model = torch.load(self.pretrained_model_path, map_location=lambda storage, loc: storage)
roi_head_feat_dict = {k[len('layer4.'):]: v for k, v in pretrained_model.items() if k.startswith('layer4.')}
self.roi_head_feature_extractor.load_state_dict(roi_head_feat_dict)
if self.output_conv5:
self.conv5.load_state_dict(roi_head_feat_dict)
def bn_eval(self):
if not self.image_feat_precomputed:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def forward(self, images, boxes, box_mask, im_info, classes=None, segms=None, mvrc_ops=None, mask_visual_embed=None):
"""
:param images: [batch_size, 3, im_height, im_width]
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:return: object reps [batch_size, max_num_objects, dim]
"""
box_inds = box_mask.nonzero()
obj_labels = classes[box_inds[:, 0], box_inds[:, 1]].type(torch.long) if classes is not None else None
assert box_inds.shape[0] > 0
if self.image_feat_precomputed:
post_roialign = boxes[box_inds[:, 0], box_inds[:, 1]][:, 4:]
boxes = boxes[:, :, :4]
else:
img_feats = self.backbone(images)
rois = torch.cat([
box_inds[:, 0, None].type(boxes.dtype),
boxes[box_inds[:, 0], box_inds[:, 1]],
], 1)
roi_align_res = self.roi_align(img_feats['body4'], rois).type(images.dtype)
if segms is not None:
pool_layers = self.head[1:]
post_roialign = self.roi_head_feature_extractor(roi_align_res)
post_roialign = post_roialign * segms[box_inds[:, 0], None, box_inds[:, 1]].to(dtype=post_roialign.dtype)
for _layer in pool_layers:
post_roialign = _layer(post_roialign)
else:
post_roialign = self.head(roi_align_res)
# Add some regularization, encouraging the model to keep giving decent enough predictions
if self.enable_cnn_reg_loss:
obj_logits = self.regularizing_predictor(post_roialign)
cnn_regularization = F.cross_entropy(obj_logits, obj_labels)[None]
# import pdb; pdb.set_trace()
feats_to_downsample = post_roialign if (self.object_embed is None or obj_labels is None) else \
torch.cat((post_roialign, self.object_embed(obj_labels)), -1)
if mvrc_ops is not None and mask_visual_embed is not None:
_to_masked = (mvrc_ops == 1)[box_inds[:, 0], box_inds[:, 1]]
import pdb; pdb.set_trace()
feats_to_downsample[_to_masked] = mask_visual_embed
coord_embed = coordinate_embeddings(
torch.cat((boxes[box_inds[:, 0], box_inds[:, 1]], im_info[box_inds[:, 0], :2]), 1),
256
)
feats_to_downsample = torch.cat((coord_embed.view((coord_embed.shape[0], -1)), feats_to_downsample), -1)
final_feats = self.obj_downsample(feats_to_downsample)
# Reshape into a padded sequence - this is expensive and annoying but easier to implement and debug...
obj_reps = pad_sequence(final_feats, box_mask.sum(1).tolist())
post_roialign = pad_sequence(post_roialign, box_mask.sum(1).tolist())
# DataParallel compatibility
obj_reps_padded = obj_reps.new_zeros((obj_reps.shape[0], boxes.shape[1], obj_reps.shape[2]))
obj_reps_padded[:, :obj_reps.shape[1]] = obj_reps
obj_reps = obj_reps_padded
post_roialign_padded = post_roialign.new_zeros((post_roialign.shape[0], boxes.shape[1], post_roialign.shape[2]))
post_roialign_padded[:, :post_roialign.shape[1]] = post_roialign
post_roialign = post_roialign_padded
# Output
output_dict = {
'obj_reps_raw': post_roialign,
'obj_reps': obj_reps,
}
if (not self.image_feat_precomputed) and self.enable_cnn_reg_loss:
output_dict.update({'obj_logits': obj_logits,
'obj_labels': obj_labels,
'cnn_regularization_loss': cnn_regularization})
if (not self.image_feat_precomputed) and self.output_conv5:
image_feature = self.img_head(img_feats['body4'])
output_dict['image_feature'] = image_feature
return output_dict
|
136479
|
import re
from bs4 import Tag
from ._abstract import AbstractScraper
from ._utils import normalize_string
"""
NOTE: This website has at least 2 prominent layouts styles, so there are two logic blocks and 2 test cases to
support in ingredients and instructions processing sections.
"""
class FarmhouseDelivery(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"recipes.farmhousedelivery.{domain}"
def title(self):
return self.soup.find("h1", {"class": "entry-title"}).get_text(strip=True)
def ingredients(self):
# Style 1
ingredients_marker = self.soup.find("p", text=re.compile(r"Ingredients:"))
if ingredients_marker is not None:
ingredients_marker_siblings = ingredients_marker.next_siblings
for ingredients_marker_sibling in ingredients_marker_siblings:
if (
isinstance(ingredients_marker_sibling, Tag)
and ingredients_marker_sibling.name == "ul"
):
ingredients = ingredients_marker_sibling.findAll("li")
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
# Style 2
ingredients_marker = self.soup.find("p", text=re.compile(r"Ingredients"))
if ingredients_marker is not None:
ingredients = []
ingredients_marker_siblings = ingredients_marker.next_siblings
for ingredients_marker_sibling in ingredients_marker_siblings:
if (
isinstance(ingredients_marker_sibling, Tag)
and ingredients_marker_sibling.name == "p"
):
if ingredients_marker_sibling.get_text() == "Instructions":
break
else:
ingredients.append(
normalize_string(ingredients_marker_sibling.get_text())
)
return ingredients
return None
def _instructions_list(self):
# Style 1
instructions_marker = self.soup.find("p", text=re.compile(r"Instructions:"))
if instructions_marker is not None:
instructions_marker_siblings = instructions_marker.next_siblings
for instructions_marker_sibling in instructions_marker_siblings:
if (
isinstance(instructions_marker_sibling, Tag)
and instructions_marker_sibling.name == "p"
and instructions_marker_sibling.get_text(strip=True) != ""
):
instructions = instructions_marker_sibling.findAll("span")
return [
normalize_string(instruction.get_text())
for instruction in instructions
]
# Style 2
instructions_marker = self.soup.find("p", text=re.compile(r"Instructions"))
if instructions_marker is not None:
instructions = []
instructions_marker_siblings = instructions_marker.next_siblings
for instructions_marker_sibling in instructions_marker_siblings:
if (
isinstance(instructions_marker_sibling, Tag)
and instructions_marker_sibling.name == "p"
and instructions_marker_sibling.get_text(strip=True) != ""
):
instructions.append(
normalize_string(instructions_marker_sibling.get_text())
)
return instructions
return None
def instructions(self):
data = self._instructions_list()
return "\n".join(data) if data else None
def image(self):
container = self.soup.find("div", {"class": "entry-content"})
if not container:
return None
image = container.find("img", {"src": True})
return image["src"] if image else None
|
136484
|
import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab, load_vocab
from gluonnlp.utils.misc import download
from gluonnlp.models.t5 import T5Tokenizer
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep <NAME>, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
# Case of T5 Tokenizer
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 't5_spm.model')
download(
url=get_repo_url() + 'tokenizer_test_models/sentencepiece/case_t5/test_t5spm-5f05e7.model',
path=vocab_path
)
extra_ids = 100
tokenizer = T5Tokenizer(vocab_path, extra_ids)
gt_tokenized = [
['▁Hello', ',', '▁', 'y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁VIII', '▁', '😁',
'▁', '😁', '▁', '😁', '▁', '?'],
['▁', 'Glu', 'on', 'N', 'LP', '▁is', '▁great', '!', '!!!!!'],
['▁', 'Glu', 'on', 'N', 'LP', '-', 'Am', 'a', 'zon', '-', 'H', 'a', 'i', 'bin', '-',
'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u', 'a', 'i', '-', 'X', 'ing', 'j',
'i', 'an', '.....', '/', ':', '!', '@', '#', '▁', "'", 'a', 'b', 'c', "'"]
]
gt_offsets = [
[(0, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21), (21, 25),
(25, 27), (27, 28), (28, 29), (29, 30), (30, 31), (31, 32), (32, 33), (33, 34), (34, 35)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18), (18, 23)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 11), (11, 12), (12, 15), (15, 16),
(16, 17), (17, 18), (18, 19), (19, 22), (22, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 49), (49, 51), (51, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (61, 62), (62, 63), (63, 64), (64, 65), (65, 66), (66, 67)]
]
gt_int_decode = [
"Hello, y'all! How are you VIII ⁇ ⁇ ⁇ ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"
]
inserted_special_tokens = list('<extra_id_{}>'.format(i) for i in range(extra_ids - 1, -1, -1))
assert list(
tokenizer.vocab.to_tokens(i) for i in range(len(tokenizer._sp_model), len(tokenizer._vocab))
) == inserted_special_tokens, 'Some <extra_id> tokens are not properly inserted.'
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
os.remove(vocab_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# \'abc\'"]
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @ # ' abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@# 'abc'"]
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @ #'abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
"""Test for huggingface tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
"""Test for huggingface BPE tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
"""Test for huggingface bytebpe tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
|
136489
|
from django import test
from django_extras.forms import fields
from django_extras.core import validators
class ColorFieldTestCase(test.TestCase):
def test_check_validator_no_alpha(self):
target = fields.ColorField()
self.assertIn(validators.validate_color, target.validators)
def test_check_validator_with_alpha(self):
target = fields.ColorField(allow_alpha=True)
self.assertIn(validators.validate_alpha_color, target.validators)
|
136502
|
from sqlalchemy import create_engine
from constants import SQLALCHEMY_URL
engine = create_engine(SQLALCHEMY_URL)
|
136505
|
from dataclasses import dataclass
from enum import IntEnum, IntFlag, auto
from functools import reduce
from struct import Struct
from typing import Any, Dict, List, Optional, Type
from .datatypes import (
AerospikeDataType,
AerospikeKeyType,
AerospikeValueType,
data_to_aerospike_type,
parse_raw,
)
# Can read about the flag in as_command.h (C client)
class Info1Flags(IntFlag):
EMPTY = 0
READ = auto()
GET_ALL = auto()
UNUSED = auto()
BATCH_INDEX = auto()
XDR = auto()
DONT_GET_BIN_DATA = auto()
READ_MODE_AP_ALL = auto()
# Last bit unused
class Info2Flags(IntFlag):
EMPTY = 0
WRITE = auto()
DELETE = auto()
GENERATION = auto()
# apply write if new generation >= old, good for RESTORE
GENERATION_GT = auto()
DURABLE_DELETE = auto()
CREATE_ONLY = auto()
UNUSED = auto()
RESPOND_ALL_OPS = auto()
class Info3Flags(IntFlag):
EMPTY = 0
LAST = auto()
COMMIT_MASTER = auto()
UNUSED = auto()
UPDATE_ONLY = auto()
CREATE_OR_REPLACE = auto()
REPLACE_ONLY = auto()
SC_READ_TYPE = auto()
SC_READ_RELAX = auto()
class FieldTypes(IntEnum):
NAMESPACE = 0
SETNAME = 1
KEY = 2
DIGEST = 4
TASK_ID = 7
SCAN_OPTIONS = 8
SCAN_TIMEOUT = 9
SCAN_RPS = 10
INDEX_RANGE = 22
INDEX_FILTER = 23
INDEX_LIMIT = 24
INDEX_ORDER = 25
INDEX_TYPE = 26
UDF_PACKAGE_NAME = 30
UDF_FUNCTION = 31
UDF_ARGLIST = 32
UDF_OP = 33
QUERY_BINS = 40
BATCH_INDEX = 41
BATCH_INDEX_WITH_SET = 42
PREDEXP = 43
@dataclass
class Field:
FORMAT = Struct("!IB")
field_type: FieldTypes
data: bytes
def pack(self) -> bytes:
length = len(self.data) + 1
return self.FORMAT.pack(length, self.field_type) + self.data
@classmethod
def parse(cls: Type["Field"], data: bytes) -> "Field":
length, field_type = cls.FORMAT.unpack(data[: cls.FORMAT.size])
data = data[cls.FORMAT.size : length]
return cls(field_type=field_type, data=data)
def __len__(self):
return len(self.data) + self.FORMAT.size
class OperationTypes(IntEnum):
READ = 1
WRITE = 2
CDT_READ = 3
CDT_MODIFY = 4
MAP_READ = 6
MAP_MODIFY = 7
INCR = 5
APPEND = 9
PREPEND = 10
TOUCH = 11
BIT_READ = 12
BIT_MODIFY = 13
DELETE = 14
@dataclass
class Bin:
FORMAT = Struct("BBB")
version: int
name: str
data: AerospikeDataType
def pack(self) -> bytes:
base = self.FORMAT.pack(self.data.TYPE, self.version, len(self.name))
return base + self.name.encode("utf-8") + self.data.pack()
@classmethod
def parse(cls: Type["Bin"], data: bytes) -> "Bin":
unpacked = cls.FORMAT.unpack(data[: cls.FORMAT.size])
btype, version, name_length = unpacked
name = data[cls.FORMAT.size : cls.FORMAT.size + name_length].decode(
"utf-8"
)
data = data[cls.FORMAT.size + name_length :]
bin_data = parse_raw(btype, data)
return cls(name=name, version=version, data=bin_data)
def __len__(self):
return self.FORMAT.size + len(self.name) + len(self.data)
@classmethod
def create(cls, name: str, data: Any, version=0) -> "Bin":
adata = data_to_aerospike_type(data)
return cls(name=name, version=version, data=adata)
@dataclass
class Operation:
# Size, Op, Bin data type, Bin version, Bin name length
FORMAT = Struct("!IB")
operation_type: OperationTypes
data_bin: Bin
def pack(self) -> bytes:
packed_bin = self.data_bin.pack()
length = len(packed_bin) + 1
return self.FORMAT.pack(length, self.operation_type) + packed_bin
@classmethod
def parse(cls: Type["Operation"], data: bytes) -> "Operation":
unpacked = cls.FORMAT.unpack(data[: cls.FORMAT.size])
size, operation_type = unpacked
data_bin = Bin.parse(data[cls.FORMAT.size : cls.FORMAT.size + size - 1])
return cls(operation_type=operation_type, data_bin=data_bin)
def __len__(self):
return len(self.data_bin) + self.FORMAT.size
@dataclass
class Message:
FORMAT = Struct("!BBBBxBIIIHH")
info1: Info1Flags
info2: Info2Flags
info3: Info3Flags
transaction_ttl: int
fields: List[Field]
operations: List[Operation]
result_code: int = 0
generation: int = 0
record_ttl: int = 0
def pack(self) -> bytes:
base = self.FORMAT.pack(
self.FORMAT.size,
self.info1,
self.info2,
self.info3,
self.result_code,
self.generation,
self.record_ttl,
self.transaction_ttl,
len(self.fields),
len(self.operations),
)
fields = reduce(
lambda x, y: x + y, (field.pack() for field in self.fields), b""
)
operations = reduce(
lambda x, y: x + y, (op.pack() for op in self.operations), b""
)
return base + fields + operations
@classmethod
def parse(cls: Type["Message"], data: bytes) -> "Message":
parsed_tuple = cls.FORMAT.unpack(data[: cls.FORMAT.size])
(
_size,
info1,
info2,
info3,
result_code,
generation,
ttl,
transaction_ttl,
fields_count,
operations_count,
) = parsed_tuple
data_left = data[cls.FORMAT.size :]
fields = []
operations = []
for _i in range(0, fields_count):
f = Field.parse(data_left)
fields.append(f)
data_left = data[len(f) :]
for _i in range(0, operations_count):
op = Operation.parse(data_left)
operations.append(op)
data_left = data_left[len(op) :]
return cls(
info1=info1,
info2=info2,
info3=info3,
result_code=result_code,
generation=generation,
record_ttl=ttl,
transaction_ttl=transaction_ttl,
fields=fields,
operations=operations,
)
def generate_namespace_set_key_fields(
namespace: str, set_name: str, key: AerospikeKeyType
) -> List[Field]:
set_encoded = set_name.encode("utf-8")
namespace_field = Field(FieldTypes.NAMESPACE, namespace.encode("utf-8"))
set_field = Field(FieldTypes.SETNAME, set_encoded)
aero_key = data_to_aerospike_type(key)
key_field = Field(FieldTypes.DIGEST, aero_key.digest(set_name))
return [namespace_field, set_field, key_field]
def put_key(
namespace: str,
set_name: str,
key: AerospikeKeyType,
bin_: Dict[str, AerospikeValueType],
ttl: int = 0,
) -> Message:
fields = generate_namespace_set_key_fields(namespace, set_name, key)
ops = []
for k, v in bin_.items():
op = Operation(OperationTypes.WRITE, Bin.create(name=k, data=v))
ops.append(op)
return Message(
info1=Info1Flags.EMPTY,
info2=Info2Flags.WRITE,
info3=Info3Flags.EMPTY,
transaction_ttl=1000,
fields=fields,
operations=ops,
record_ttl=ttl,
)
def get_key(namespace: str, set_name: str, key: AerospikeKeyType) -> Message:
fields = generate_namespace_set_key_fields(namespace, set_name, key)
return Message(
info1=Info1Flags.READ | Info1Flags.GET_ALL,
info2=Info2Flags.EMPTY,
info3=Info3Flags.EMPTY,
transaction_ttl=1000,
fields=fields,
operations=[],
)
def delete_key(namespace: str, set_name: str, key: AerospikeKeyType) -> Message:
fields = generate_namespace_set_key_fields(namespace, set_name, key)
return Message(
info1=Info1Flags.EMPTY,
info2=Info2Flags.DELETE | Info2Flags.WRITE,
info3=Info3Flags.EMPTY,
transaction_ttl=1000,
fields=fields,
operations=[],
)
def key_exists(namespace: str, set_name: str, key: AerospikeKeyType) -> Message:
fields = generate_namespace_set_key_fields(namespace, set_name, key)
return Message(
info1=Info1Flags.READ | Info1Flags.DONT_GET_BIN_DATA,
info2=Info2Flags.EMPTY,
info3=Info3Flags.EMPTY,
transaction_ttl=1000,
fields=fields,
operations=[],
)
def operate(
namespace: str,
set_name: str,
key: AerospikeKeyType,
info1: Info1Flags,
info2: Info2Flags,
info3: Info3Flags,
operations: List[Operation],
fields: Optional[List[Field]] = None,
ttl: int = 0,
generation: int = 0,
):
fields = fields or []
fields += generate_namespace_set_key_fields(namespace, set_name, key)
return Message(
info1=info1,
info2=info2,
info3=info3,
transaction_ttl=1000,
fields=fields,
operations=operations,
generation=generation,
record_ttl=ttl,
)
|
136545
|
import numpy as np
import pandas as pd
l_2d = [[0, 1, 2], [3, 4, 5]]
arr_t = np.array(l_2d).T
print(arr_t)
print(type(arr_t))
# [[0 3]
# [1 4]
# [2 5]]
# <class 'numpy.ndarray'>
l_2d_t = np.array(l_2d).T.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
df_t = pd.DataFrame(l_2d).T
print(df_t)
print(type(df_t))
# 0 1
# 0 0 3
# 1 1 4
# 2 2 5
# <class 'pandas.core.frame.DataFrame'>
l_2d_t = pd.DataFrame(l_2d).T.values.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
l_2d_t_tuple = list(zip(*l_2d))
print(l_2d_t_tuple)
print(type(l_2d_t_tuple))
# [(0, 3), (1, 4), (2, 5)]
# <class 'list'>
print(l_2d_t_tuple[0])
print(type(l_2d_t_tuple[0]))
# (0, 3)
# <class 'tuple'>
l_2d_t = [list(x) for x in zip(*l_2d)]
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
print(l_2d_t[0])
print(type(l_2d_t[0]))
# [0, 3]
# <class 'list'>
print(*l_2d)
# [0, 1, 2] [3, 4, 5]
print(list(zip([0, 1, 2], [3, 4, 5])))
# [(0, 3), (1, 4), (2, 5)]
print([list(x) for x in [(0, 3), (1, 4), (2, 5)]])
# [[0, 3], [1, 4], [2, 5]]
|
136555
|
import json
from dispatch.enums import DispatchEnum
from dispatch.incident.models import Incident
from dispatch.feedback.enums import FeedbackRating
class RatingFeedbackBlockId(DispatchEnum):
anonymous = "anonymous_field"
feedback = "feedback_field"
rating = "rating_field"
class RatingFeedbackCallbackId(DispatchEnum):
submit_form = "rating_feedback_submit_form"
def rating_feedback_view(incident: Incident, channel_id: str):
"""Builds all blocks required to rate and provide feedback about an incident."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Incident Feedback"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": "Use this form to rate your experience and provide feedback about the incident.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Submit"},
"callback_id": RatingFeedbackCallbackId.submit_form,
"private_metadata": json.dumps({"incident_id": str(incident.id), "channel_id": channel_id}),
}
rating_picker_options = []
for rating in FeedbackRating:
rating_picker_options.append(
{"text": {"type": "plain_text", "text": rating}, "value": rating}
)
rating_picker_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.rating,
"label": {"type": "plain_text", "text": "Rate your experience"},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select a rating"},
"options": rating_picker_options,
},
"optional": False,
}
modal_template["blocks"].append(rating_picker_block)
feedback_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.feedback,
"label": {"type": "plain_text", "text": "Give us feedback"},
"element": {
"type": "plain_text_input",
"action_id": RatingFeedbackBlockId.feedback,
"placeholder": {
"type": "plain_text",
"text": "How would you describe your experience?",
},
"multiline": True,
},
"optional": False,
}
modal_template["blocks"].append(feedback_block)
anonymous_checkbox_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.anonymous,
"label": {
"type": "plain_text",
"text": "Check the box if you wish to provide your feedback anonymously",
},
"element": {
"type": "checkboxes",
"action_id": RatingFeedbackBlockId.anonymous,
"options": [
{
"value": "anonymous",
"text": {"type": "plain_text", "text": "Anonymize my feedback"},
},
],
},
"optional": True,
}
modal_template["blocks"].append(anonymous_checkbox_block)
return modal_template
|
136583
|
from enum import Enum
class ModeIndicator(Enum):
INPUT = '+' # use existing variable
OUTPUT = '-' # create new variable (do not reuse existing)
CONSTANT = 'c' # insert constant
|
136584
|
from django.db import migrations, models
import two_factor.models
class Migration(migrations.Migration):
dependencies = [
('two_factor', '0005_auto_20160224_0450'),
]
operations = [
migrations.AlterField(
model_name='phonedevice',
name='key',
field=models.CharField(default=two_factor.models.random_hex_str, help_text='Hex-encoded secret key', max_length=40, validators=[two_factor.models.key_validator]),
),
]
|
136591
|
import os
from setka.pipes.logging.progressbar.theme_parser import view_status, format_status
# from setka.pipes.logging.progressbar.theme import main_theme
try:
from IPython.display import display, update_display
except:
pass
def isnotebook():
try:
shell = get_ipython().__class__.__name__
module = get_ipython().__class__.__module__
if module == "google.colab._shell":
return True
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
def nlines(text):
return text.count('\n') + 1
class StageProgressBar:
def __init__(self, width_function=None, display_id=0, is_ipython=None):
self.width_function = width_function
self.last_vals = None
self.finalized = False
self.started = False
self.is_ipython = isnotebook() if is_ipython is None else is_ipython
self.display_id = display_id
def __str__(self):
status = format_status(self.last_vals)
to_view = view_status(status, display_len=self.width)
return to_view
def display(self, content):
if not self.is_ipython:
print(content, end='')
print('\033[' + str(nlines(content)) + 'A')
else:
update_display({'text/plain': content}, display_id=self.display_id, raw=True)
def __del__(self):
self.finalize()
def update(self, vals):
if self.finalized:
return
self.width = self.width_function()
self.last_vals = vals
cur_info = str(self)
if not self.started:
self.started = True
if self.is_ipython:
display({'text/plain': ''}, display_id=self.display_id, raw=True)
self.display(cur_info)
def finalize(self):
if (not self.finalized) and (not self.is_ipython):
print(str(self))
|
136611
|
import csv
import os
import cv2
import random
import argparse
def main(args):
image_path = args.image_path
csv_path = args.csv_path
preprocess(image_path, csv_path)
def preprocess(image_path, csv_path):
print("start preprocess...")
f = open(csv_path, 'w', encoding='utf-8', newline = '')
csv_writer = csv.writer(f)
csv_writer.writerow(["ID", "CATE", "size"])
for image in os.listdir(image_path):
image_file_path = image_path + '/' + image
img_tmp = cv2.imread(image_file_path, 0)
size = (img_tmp == 255).sum()
p = random.randint(0, 1)
csv_writer.writerow([image, p, size])
print(f"{image} done!")
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="PREPROCESS", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image_path', type=str, default="./train_data/label", help='image path')
parser.add_argument('--csv_path', type=str, default="./train_data/train.csv", help='csv path')
args, unkown = parser.parse_known_args()
main(args)
|
136696
|
import spacy
import classy_classification # noqa: F401
from .data import training_data, validation_data
nlp = spacy.blank("en")
nlp.add_pipe("text_categorizer", config={"data": list(training_data.keys()), "cat_type": "zero", "include_sent": True})
print([sent._.cats for sent in nlp(validation_data[0]).sents])
print([doc._.cats for doc in nlp.pipe(validation_data)])
|
136726
|
import os
import asyncio
import chess
import discord
from discord.ext import commands, tasks
from discord.ext.commands import Context
from cogs.utils.chess_utils import ChessUtils
import berserk
"""
{'type': 'gameState',
'moves': 'g1f3',
'wtime': datetime.datetime(1970, 1, 25, 20, 31, 23, 647000, tzinfo=datetime.timezone.utc),
'btime': datetime.datetime(1970, 1, 25, 20, 31, 23, 647000, tzinfo=datetime.timezone.utc),
'winc': datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),
'binc': datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),
'wdraw': False,
'bdraw': False,
'status': 'started'}
"""
class Chess(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
session = berserk.TokenSession(os.getenv("ACCESS_TOKEN"))
self.client = berserk.Client(session)
self.utils = ChessUtils(self.client)
@commands.group(invoke_without_command=True, name="chess")
async def chess(self, ctx: Context):
if ctx.invoked_subcommand is None:
await ctx.send("Please enter a subcommand")
@chess.command(name="import")
async def _import(self, ctx: Context, lichess_game_id: str):
"""Show a game from Lichess.org"""
dump_channel = self.bot.get_channel(int(os.getenv("DUMP_CHANNEL", "868392499348144180")))
game = self.utils.import_game_by_id(lichess_game_id)
positions = list(game.mainline())
position_index = 0
embed, image_file = self.utils.create_position_embed(game, game.board())
dump_message = await dump_channel.send(file=image_file)
message = await ctx.send(embed=embed.set_image(url=dump_message.attachments[0].url))
await message.add_reaction("◀")
await message.add_reaction("▶")
def check(reaction, user):
return user.id == ctx.author.id and reaction.message.id == message.id and (str(
reaction.emoji) == "◀" or str(
reaction.emoji) == "▶")
try:
while True:
reaction, user = await self.bot.wait_for("reaction_add", timeout=60, check=check)
if str(reaction.emoji) == "▶":
if position_index < len(positions) - 1:
position_index += 1
if position_index == len(positions) - 1:
embed, image_file = self.utils.create_position_embed(game,
positions[position_index].board(),
end=True)
else:
embed, image_file = self.utils.create_position_embed(game,
positions[position_index].board())
dump_message = await dump_channel.send(file=image_file)
await message.edit(embed=embed.set_image(url=dump_message.attachments[0].url))
elif str(reaction.emoji) == "◀":
if position_index > 0:
position_index -= 1
embed, image_file = self.utils.create_position_embed(game, positions[position_index].board())
dump_message = await dump_channel.send(file=image_file)
await message.edit(embed=embed.set_image(url=dump_message.attachments[0].url))
await reaction.remove(user)
except asyncio.TimeoutError:
return
@chess.command(name="play")
async def play(self, ctx: Context, level: int, color: str = "white"):
dump_channel = self.bot.get_channel(int(os.getenv("DUMP_CHANNEL", "868392499348144180")))
WHITE = berserk.Color.WHITE
BLACK = berserk.Color.BLACK
level, color, headers, board = setup_game(ctx, level, color)
game_dict = self.utils.create_ai_game(level, color)
game_id = game_dict["id"]
message = await send_attachment_embed(ctx, dump_channel, **self.utils.render_board(board, headers))
looper = Loop_creator(self.bot, self.client, game_id)
looper.start_gameState_listener.start()
def check(game_id_check, last_move_check, len_moves):
return game_id_check == game_dict["id"]
def check_user_move(msg):
return msg.author.id == ctx.author.id and len(msg.content) == 4
if color == WHITE:
await ask_user_for_move(ctx, self.client, self.bot, check_user_move, board, game_id)
while True:
await edit_attachment_embed(message, dump_channel, **self.utils.render_board(board, headers))
game_id, last_move, len_moves = await self.bot.wait_for('move', check=check)
if len_moves % 2 == 0:
if color == WHITE:
await ask_user_for_move(ctx, self.client, self.bot, check_user_move, board, game_id)
else:
continue
else:
if color == WHITE:
continue
else:
await ask_user_for_move(ctx, self.client, self.bot, check_user_move, board, game_id)
class Loop_creator:
def __init__(self, bot, client, game_id):
self.game_id = game_id
self.client = client
self.bot = bot
@tasks.loop(count=1)
async def start_gameState_listener(self):
stream = self.client.bots.stream_game_state(self.game_id)
for event in stream:
if event['type'] == 'gameState':
last_move = event['moves'].split()[-1]
len_moves = len(event['moves'].split())
self.bot.dispatch("move", self.game_id, last_move, len_moves)
async def send_attachment_embed(ctx: Context, dump_channel: discord.TextChannel, embed, file):
dump_message = await dump_channel.send(file=file)
message = await ctx.send(embed=embed.set_image(url=dump_message.attachments[0].url))
return message
async def edit_attachment_embed(message: discord.Message, dump_channel: discord.TextChannel, embed, file):
dump_message = await dump_channel.send(file=file)
message = await message.edit(embed=embed.set_image(url=dump_message.attachments[0].url))
return message
async def ask_user_for_move(ctx, client, bot, check, board, game_id):
legal = False
while not legal:
await ctx.send(f"{ctx.author.mention} Make a move, You have 30 seconds to make a move")
message = await bot.wait_for("message", check=check, timeout=30.0)
content = message.content.lower()
try:
client.bots.make_move(game_id, content)
board.push(content)
legal = True
except:
await ctx.send(f"{ctx.author.mention} Please make a legal move")
def setup_game(ctx, level: int, color: str):
if level < 0:
level = 0
elif level > 8:
level = 8
if color.lower() == "black":
color = berserk.Color.BLACK
headers = {
"White": "Stockfish",
"Black": ctx.author.name,
"WhiteElo": f"Level {level}",
"BlackElo": "Unkown",
"Event": "Unrated Versus AI",
"Result": "Unkown"
}
else:
color = berserk.Color.WHITE
headers = {
"White": ctx.author.name,
"Black": "Stockfish",
"WhiteElo": "Unkown",
"BlackElo": f"Level {level}",
"Event": "Unrated Versus AI",
"Result": "Unkown"
}
board = chess.Board()
return level, color, headers, board
def setup(bot):
bot.add_cog(Chess(bot))
|
136744
|
import time
def timeit(method):
""" Get the time it takes for a method to run.
Args:
method (function): The function to time.
Returns:
Method wrapped with an operation to time it.
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print('%r \n %2.2f ms' % (method, (te - ts) * 1000))
return result
return timed
|
136745
|
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('mt')
J.url = 'http://montana.gov'
|
136798
|
import invoke
import docs
import installers
import shims
namespace = invoke.Collection(docs, installers, shims)
|
136842
|
class QueryToken:
"""A placeholder token for dry-run query output"""
def __str__(self) -> str:
return "?"
def __repr__(self) -> str:
return "?"
|
136857
|
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import auc,precision_recall_curve,roc_curve,confusion_matrix
import os,sys
import pickle
def draw_ROC(y_true,y_pred):
fpr,tpr,_ = roc_curve(y_true,y_pred,pos_label=1)
area_mine = auc(fpr,tpr)
fig = plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % area_mine)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def draw_PR(y_true,y_pred):
precision,recall,_ = precision_recall_curve(y_true,y_pred,pos_label=1)
area_PR = auc(recall,precision)
baseline = np.sum(np.array(y_true) == 1) / len(y_true)
plt.figure()
lw = 2
plt.plot(recall,precision, color='darkorange',
lw=lw, label='PR curve (area = %0.2f)' % area_PR)
plt.plot([0, 1], [baseline, baseline], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('PR curve example')
plt.legend(loc="lower right")
plt.show()
def seperateCNN():
input1 = keras.Input(shape=(10, 12, 1))
input2 = keras.Input(shape=(46, 12, 1))
x = layers.Conv2D(filters=16, kernel_size=(2, 12))(input1) # 9
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.Conv2D(filters=32, kernel_size=(2, 1))(x) # 8
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(x) # 4
x = layers.Flatten()(x)
x = keras.Model(inputs=input1, outputs=x)
y = layers.Conv2D(filters=16, kernel_size=(15, 12))(input2) # 32
y = layers.BatchNormalization()(y)
y = keras.activations.relu(y)
y = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(y) # 16
y = layers.Conv2D(filters=32,kernel_size=(9,1))(y) # 8
y = layers.BatchNormalization()(y)
y = keras.activations.relu(y)
y = layers.MaxPool2D(pool_size=(2, 1),strides=(2,1))(y) # 4
y = layers.Flatten()(y)
y = keras.Model(inputs=input2,outputs=y)
combined = layers.concatenate([x.output,y.output])
z = layers.Dense(128,activation='relu')(combined)
z = layers.Dropout(0.2)(z)
z = layers.Dense(1,activation='sigmoid')(z)
model = keras.Model(inputs=[input1,input2],outputs=z)
return model
def pull_peptide_aaindex(dataset):
result = np.empty([len(dataset),10,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][0]
return result
def pull_hla_aaindex(dataset):
result = np.empty([len(dataset),46,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][1]
return result
def pull_label_aaindex(dataset):
col = [item[2] for item in dataset]
result = [0 if item == 'Negative' else 1 for item in col]
result = np.expand_dims(np.array(result),axis=1)
return result
def aaindex(peptide,after_pca):
amino = 'ARNDCQEGHILKMFPSTWYV-'
matrix = np.transpose(after_pca) # [12,21]
encoded = np.empty([len(peptide), 12]) # (seq_len,12)
for i in range(len(peptide)):
query = peptide[i]
if query == 'X': query = '-'
query = query.upper()
encoded[i, :] = matrix[:, amino.index(query)]
return encoded
def peptide_data_aaindex(peptide,after_pca): # return numpy array [10,12,1]
length = len(peptide)
if length == 10:
encode = aaindex(peptide,after_pca)
elif length == 9:
peptide = peptide[:5] + '-' + peptide[5:]
encode = aaindex(peptide,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def dict_inventory(inventory):
dicA, dicB, dicC = {}, {}, {}
dic = {'A': dicA, 'B': dicB, 'C': dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def rescue_unknown_hla(hla, dic_inventory):
type_ = hla[4]
first2 = hla[6:8]
last2 = hla[8:]
big_category = dic_inventory[type_]
#print(hla)
if not big_category.get(first2) == None:
small_category = big_category.get(first2)
distance = [abs(int(last2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal)
else:
small_category = list(big_category.keys())
distance = [abs(int(first2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0])
def hla_data_aaindex(hla_dic,hla_type,after_pca): # return numpy array [34,12,1]
try:
seq = hla_dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type,dic_inventory)
seq = hla_dic[hla_type]
encode = aaindex(seq,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def construct_aaindex(ori,hla_dic,after_pca):
series = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
hla_type = ori['HLA'].iloc[i]
immuno = np.array(ori['immunogenicity'].iloc[i]).reshape(1,-1) # [1,1]
encode_pep = peptide_data_aaindex(peptide,after_pca) # [10,12]
encode_hla = hla_data_aaindex(hla_dic,hla_type,after_pca) # [46,12]
series.append((encode_pep, encode_hla, immuno))
return series
def hla_df_to_dic(hla):
dic = {}
for i in range(hla.shape[0]):
col1 = hla['HLA'].iloc[i] # HLA allele
col2 = hla['pseudo'].iloc[i] # pseudo sequence
dic[col1] = col2
return dic
def retain_910(ori):
cond = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
if len(peptide) == 9 or len(peptide) == 10:
cond.append(True)
else:
cond.append(False)
data = ori.loc[cond]
data = data.set_index(pd.Index(np.arange(data.shape[0])))
return data
def draw_history(history):
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.legend()
# plot accuracy during training
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='validation')
plt.legend()
plt.show()
if __name__ == '__main__':
os.chdir('/Users/ligk2e/Desktop/deepimmuno/reproduce')
after_pca = np.loadtxt('./data/after_pca.txt')
ori = pd.read_csv('./data/remove0123_sample100.csv')
ori = ori.sample(frac=1, replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
hla = pd.read_csv('./data/hla2paratopeTable_aligned.txt', sep='\t')
hla_dic = hla_df_to_dic(hla)
inventory = list(hla_dic.keys())
dic_inventory = dict_inventory(inventory)
dataset = construct_aaindex(ori, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
# let's do a train/validation split
bucket_roc = []
bucket_pr = []
for i in range(10):
array = np.arange(len(dataset))
train_index = np.random.choice(array,int(len(dataset)*0.9),replace=False)
valid_index = [item for item in array if item not in train_index]
input1_train = input1[train_index]
input1_valid = input1[valid_index]
input2_train = input2[train_index]
input2_valid = input2[valid_index]
label_train = label[train_index]
label_valid = label[valid_index]
cnn_model = seperateCNN()
cnn_model.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'])
callback_val = keras.callbacks.EarlyStopping(monitor='val_loss', patience=15,restore_best_weights=False)
callback_train = keras.callbacks.EarlyStopping(monitor='loss',patience=2,restore_best_weights=False)
history = cnn_model.fit(
x=[input1_train,input2_train], # feed a list into
y=label_train,
validation_data = ([input1_valid,input2_valid],label_valid),
batch_size=128,
epochs=200,
class_weight = {0:0.5,1:0.5}, # I have 20% positive and 80% negative in my training data
callbacks = [callback_val,callback_train])
valid = ori.loc[valid_index]
valid['cnn_regress'] = cnn_model.predict([input1_valid,input2_valid])
valid = valid.sort_values(by='cnn_regress',ascending=False).set_index(pd.Index(np.arange(valid.shape[0])))
y_true = [1 if not item == 'Negative' else 0 for item in valid['immunogenicity']]
y_pred = valid['cnn_regress']
fpr,tpr,_ = roc_curve(y_true,y_pred)
area = auc(fpr,tpr)
bucket_roc.append((fpr,tpr,_,area))
precision, recall, _ = precision_recall_curve(y_true, y_pred)
area = auc(recall, precision)
bucket_pr.append((precision, recall, _, area))
# ROC
bucket = bucket_roc
fig,ax = plt.subplots()
for i in range(10):
ax.plot(bucket[i][0],bucket[i][1],lw=0.5,label='CV(Fold={0}), AUC={1:.2f}'.format(i+1,bucket[i][3]))
ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic')
ax.legend(loc="lower right",fontsize=9)
plt.show()
# PR
bucket = bucket_pr
fig,ax = plt.subplots()
for i in range(10):
ax.plot(bucket[i][1],bucket[i][0],lw=0.5,label='CV(Fold={0}),AUC={1:.2f}'.format(i+1,bucket[i][3]))
#baseline = np.sum(np.array(y_true) == 1) / len(y_true) # 0.4735
baseline = 0.4735
ax.plot([0, 1], [baseline, baseline], color='navy', lw=2, linestyle='--')
ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title('PR curve example')
ax.legend(loc="lower left",fontsize=8)
plt.show()
|
136858
|
RED, BLACK = True, False
class Node:
def __init__(self, key, value, color, size):
self.k = key
self.v = value
self.left, self.right = None, None
self.color = color
self.size = size
@staticmethod
def is_red(current):
if current is None: return False
return current.color == RED
@staticmethod
def is_black(current):
return not Node.is_red(current)
@staticmethod
def size(current):
if current is None: return 0;
return current.size
@staticmethod
def rotate_left(current):
assert current is not None
assert Node.is_red(current.right)
new_root = current.right
current.right = new_root.left
new_root.left = current
new_root.color = current.color
current.color = RED
new_root.size = current.size
current.size = 1 + Node.size(current.left) + Node.size(current.right)
return new_root
@staticmethod
def rotate_right(current):
assert current is not None
assert Node.is_red(current.left)
new_root = current.left
current.left = new_root.right
new_root.right = current
new_root.color = current.color
current.color = RED
new_root.size = current.size
current.size = 1 + Node.size(current.left) + Node.size(current.right)
return new_root
@staticmethod
def flip_colors(current):
assert current is not None
assert current.left is not None
assert current.right is not None
assert current.left.color != current.color
assert current.right.color != current.color
assert current.right.color == current.left.color
current.color = not current.color
current.left.color = not current.left.color
current.right.color = not current.right.color
# make current.left or one of its childred red
@staticmethod
def move_red_left(current):
assert current is not None
assert Node.is_red(current)
assert Node.is_black(current.left)
assert Node.is_black(current.left.left)
Node.flip_colors(current)
if Node.is_red(current.right.left):
current.right = Node.rotate_right(current.right)
current = Node.rotate_left(current.right)
Node.flip_colors(current)
return current
# make current.right or one of its children red
@staticmethod
def move_red_right(current):
assert current is not None
assert Node.is_red(current)
assert Node.is_black(current.right)
assert Node.is_black(current.right.left)
Node.flip_colors(current)
if Node.is_red(current.left.left):
current = Node.rotate_right(current)
Node.flip_colors(current)
return current
@staticmethod
def balance(current):
# fix right leaning links
if Node.is_black(current.left) and Node.is_red(current.right):
current = Node.rotate_left(current)
if Node.is_red(current.left) and Node.is_red(current.left.left):
current = Node.rotate_right(current)
if Node.is_red(current.left) and Node.is_red(current.right):
Node.flip_colors(current)
# update size
current.size = 1 + Node.size(current.left) + Node.size(current.right)
return current
@staticmethod
def put(current, k, v):
if current is None:
return Node(k, v, RED, 1)
if k < current.k:
current.left = Node.put(current.left, k, v)
elif k > current.k:
current.right = Node.put(current.right, k, v)
else:
current.v = v
return Node.balance(current)
@staticmethod
def delete_min(current):
if current.left is None: return None
if Node.is_black(current.left) and Node.is_black(current.left.left):
current = Node.move_red_left(current)
current.left = Node.delete_min(current.left)
return Node.balance(current)
@staticmethod
def delete_max(current):
if Node.is_red(current.left):
current = Node.rotate_right(current)
if current.right is None:
return None
if Node.is_black(current.right) and Node.is_black(current.right.left):
current = Node.move_red_right(current)
current.right = Node.delete_max(current.right)
return Node.balance(current)
@staticmethod
def minimum(current):
if current.left is None:
return current
return Node.minimum(current.left)
@staticmethod
def maximum(current):
if current.right is None:
return current
return Node.minimum(current.right)
@staticmethod
def delete(current, k):
if k < current.k:
if Node.is_black(current.left) and Node.is_black(current.left.left):
current = Node.move_red_left(current)
current.left = Node.delete(current.left, k)
else:
if Node.is_red(current.left):
current = Node.rotate_right(current)
if k == current.k and current.right is None:
return None
if Node.is_black(current.right) and Node.is_black(current.right.left):
current = Node.move_red_right(current)
if k == current.k:
new_minimum = Node.minimum(current.right)
current.k, current.v = new_minimum.k, new_minimum.v
current.right = Node.delete_min(current.right)
else:
current.right = Node.delete(current.right, k)
return Node.balance(current)
class RedBlackTree:
def __init__(self):
self.root = None
def is_empty(self):
return self.root is None
def size(self):
return Node.size(self.root)
def insert(self, key, value):
if key is None: return
self.root = Node.put(self.root, key, value)
self.root.color = BLACK
def delete(self, key):
if self.is_empty(): return
if self.contains(key) is False: return
if Node.is_black(self.root.left) and Node.is_black(self.root.right):
node.root = RED
self.root = Node.delete(self.root)
if self.is_empty() is False: self.root.color = BLACK
def delete_min(self):
is self.is_empty(): return
if Node.is_black(self.root.left) and Node.is_black(self.root.right):
node.root = RED
self.root = Node.delete_min(self.root)
if self.is_empty() is False: self.root.color = BLACK
def delete_max(self):
is self.is_empty(): return
if Node.is_black(self.root.left) and Node.is_black(self.root.right):
node.root = RED
self.root = Node.delete_max(self.root)
if self.is_empty() is False: self.root.color = BLACK
def get(self, key)
if key is None: return None
current = self.root
while current is not None:
if key > current.key:
current = current.right
elif key < current.key:
current = current.left
else:
return current.value
return None
def contains(self, key):
return self.get(key) is not None
|
136867
|
import redis
class Redis:
@classmethod
def setex(cls, name, time, value):
r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)
r.setex(name, time, value)
@classmethod
def get(cls, name):
r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)
value = r.get(name)
if value is None:
return None
else:
# pytyhon3 redis默认返回的是bytes
return bytes.decode(value)
@classmethod
def delete(cls, name):
r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)
r.delete(name) # 看源码明明可以支持多参数的,但是之前把参数封装成*names会删除失败
|
136880
|
import numpy
from PIL import Image
import scipy.ndimage
import sys
path = sys.argv[1]
region = sys.argv[2]
x_start = int(sys.argv[3])
y_start = int(sys.argv[4])
x_end = int(sys.argv[5])
y_end = int(sys.argv[6])
out_fname = sys.argv[7]
x_len = x_end - x_start
y_len = y_end - y_start
merged_im = numpy.zeros((x_len * 4096, y_len * 4096, 3), dtype='uint8')
for i in xrange(x_len):
for j in xrange(y_len):
fname = '{}/{}_{}_{}_sat.png'.format(path, region, x_start + i, y_start + j)
merged_im[i*4096:(i+1)*4096, j*4096:(j+1)*4096, :] = scipy.ndimage.imread(fname)[:, :, 0:3].swapaxes(0, 1)
Image.fromarray(merged_im.swapaxes(0, 1)).save(out_fname)
|
136954
|
import json
import time
from sqlalchemy import insert
from sqlalchemy.sql.expression import bindparam
from autocnet.io.db.model import Points, Measures
from autocnet.utils.serializers import object_hook
from autocnet.transformation.spatial import reproject, og2oc
def watch_insert_queue(queue, queue_name, counter_name, engine, stop_event, sleep_time=5):
"""
A worker process to be launched in a thread that will asynchronously insert or update
objects in the Session using dicts pulled from a redis queue. Using this queuing approach
many cluster jobs are able to push to the redis queue rapidly and then a single writer
process can push the data back to the database.
This function requires that the function called by the asynchronous cluster job INCR
(increment) the counter_name key in the redis cache. This counter is INCR (incremented)
by cluster jobs to track how many messages have been pushed to the queue (queue_name).
This func then reads that many messages and DECR (de-increments) the counter by that
many messages. This way this function only reads when data is present and reads can occur
asynchronously. This works becase the cluster job pushes to the right side of the redis
list and this function reads n-messages from the left side.
This method uses the sqlalchemy core interface for performance reasons. Therefore, some
mundging of column names is used to ensure that the model to be processed matches the
database column names.
Parameters
----------
queue : obj
A Redis or StrictRedis connection instance
queue_name : str
The name of the queue to watch
counter_name : str
The name of the incrementing counter to watch.
operation : str
either 'insert' or 'update'. If 'insert', the sqlalchemy
bulk_insert_mappings func is used to add new rows. If 'update',
the sqlalchemy bulks_update_mappings func is used.
engine : obj
A sqlalchemy engine.
stop_event : obj
A threading.Event object with set and is_set members. This is the
poison pill that can be set to terminate the thread.
"""
while not stop_event.is_set():
# Determine the read length of objects to pull from the cache
read_length = int(queue.get(counter_name))
# Pull the objects from the cache
points = []
measures = []
# Pull the SRID dynamically from the model (database)
rect_srid = Points.rectangular_srid
lat_srid = Points.latitudinal_srid
for i in range(0, read_length):
msg = json.loads(queue.lpop(queue_name), object_hook=object_hook)
if isinstance(msg, dict):
# A NULL id is not allowable, so pop if a NULL ID exists
if msg['id'] == None:
msg.pop('id', None)
# Since this avoids the ORM, need to map the table names manually
msg['pointType'] = msg['pointtype']
adjusted = msg['adjusted']
msg['adjusted'] = f'SRID={rect_srid};' + adjusted.wkt # Geometries go in as EWKT
msg['apriori'] = f'SRID={rect_srid};' + adjusted.wkt
lon_og, lat_og, _ = reproject([adjusted.x, adjusted.y, adjusted.z],
Points.semimajor_rad, Points.semiminor_rad,
'geocent', 'latlon')
lon, lat = og2oc(lon_og, lat_og, Points.semimajor_rad, Points.semiminor_rad)
msg['geom'] = f'SRID={lat_srid};Point({lon} {lat})'
# Measures are removed and manually added later
point_measures = msg.pop('measures', [])
if point_measures:
measures.append(point_measures)
points.append(msg)
# The message was successfully read, so atomically deincrement the counter
queue.decr(counter_name)
if points:
# Write the cached objects into the database
with engine.connect() as conn:
resp = conn.execute(
insert(Points.__table__).returning(Points.__table__.c.id),points
)
pointids = [i[0] for i in resp.all()]
# Measures are a list of lists. Associate each list with a pointid and then flatten the list
for i, measure_set in enumerate(measures):
for measure in measure_set:
measure['pointid'] = pointids[i]
measure.pop('id', None) # As above, remove the NULL id
# Remap field names because the ORM is NOT being used
measure['serialnumber'] = measure.pop('serial', None)
measure['measureType'] = measure.pop('measuretype')
measures = [measure for sublist in measures for measure in sublist]
conn.execute(
insert(Measures.__table__), measures)
time.sleep(sleep_time)
def watch_update_queue(queue, queue_name, counter_name, engine, stop_event, sleep_time=5):
"""
A worker process to be launched in a thread that will asynchronously insert or update
objects in the Session using dicts pulled from a redis queue. Using this queuing approach
many cluster jobs are able to push to the redis queue rapidly and then a single writer
process can push the data back to the database.
This function requires that the function called by the asynchronous cluster job INCR
(increment) the counter_name key in the redis cache. This counter is INCR (incremented)
by cluster jobs to track how many messages have been pushed to the queue (queue_name).
This func then reads that many messages and DECR (de-increments) the counter by that
many messages. This way this function only reads when data is present and reads can occur
asynchronously. This works becase the cluster job pushes to the right side of the redis
list and this function reads n-messages from the left side.
This method uses the sqlalchemy core interface for performance reasons. Therefore, some
mundging of column names is used to ensure that the model to be processed matches the
database column names.
Parameters
----------
queue : obj
A Redis or StrictRedis connection instance
queue_name : str
The name of the queue to watch
counter_name : str
The name of the incrementing counter to watch.
operation : str
either 'insert' or 'update'. If 'insert', the sqlalchemy
bulk_insert_mappings func is used to add new rows. If 'update',
the sqlalchemy bulks_update_mappings func is used.
engine : obj
A sqlalchemy engine.
stop_event : obj
A threading.Event object with set and is_set members. This is the
poison pill that can be set to terminate the thread.
"""
while not stop_event.is_set():
# Determine the read length of objects to pull from the cache
read_length = int(queue.get(counter_name))
# Pull the objects from the cache
measures = []
for i in range(0, read_length):
msg = json.loads(queue.lpop(queue_name), object_hook=object_hook)
if isinstance(msg, dict):
msg['_id'] = msg.pop('id', None) # id is reserved by sqlalchemy on insert/update, remapped below
measures.append(msg)
# The message was successfully read, so atomically deincrement the counter
queue.decr(counter_name)
# Write the updated measures to the db
if measures:
with engine.connect() as conn:
stmt = Measures.__table__.update().\
where(Measures.__table__.c.id == bindparam('_id')).\
values({'weight':bindparam('weight'),
'measureIgnore':bindparam('ignore'),
'templateMetric':bindparam('template_metric'),
'templateShift':bindparam('template_shift'),
'line': bindparam('line'),
'sample':bindparam('sample'),
'ChooserName':bindparam('choosername')})
resp = conn.execute(
stmt, measures
)
time.sleep(sleep_time)
|
136959
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
extensions = Extension(name='tree_collection',
sources=['py_wrapper.pyx',
'../src/ProblemParser.cc',
'../src/MinSqTree.cc',
'../src/newick.cc',
],
language='c++',
)
setup(
name="tree_collection",
cmdclass={'build_ext': build_ext},
ext_modules=[extensions]
)
|
136969
|
from .buyback_metrics import *
from .fei_metrics import *
from .fei_volume_metrics import *
from .pcv_metrics import *
|
136970
|
from __future__ import print_function
import os
import shutil
import sys
import tempfile
import unittest
import pandas as pd
import pyspark
import pytest
import sklearn.datasets
from sklearn.neighbors import KNeighborsClassifier
from mlflow.pyfunc import load_pyfunc, spark_udf
from mlflow.pyfunc.spark_model_cache import SparkModelCache
import mlflow.sklearn
def score_model_as_udf(model_path, run_id, pandas_df):
spark = pyspark.sql.SparkSession.builder \
.config(key="spark.python.worker.reuse", value=True) \
.master("local-cluster[2, 1, 1024]") \
.getOrCreate()
spark_df = spark.createDataFrame(pandas_df)
pyfunc_udf = spark_udf(spark, model_path, run_id, result_type="double")
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
return [x['prediction'] for x in new_df.collect()]
class TestSparkUDFs(unittest.TestCase):
def setUp(self):
self._tmp = tempfile.mkdtemp("mlflow-spark-test", dir="/tmp")
# NB: local-cluster mode actually sets up 2 executors, each given 1 core
# and 1024 MB of memory. This is the best way to simulate pickling/serialization
# behavior to ensure it will work as expected on a real cluster.
self.spark = pyspark.sql.SparkSession.builder \
.config(key="spark.python.worker.reuse", value=True) \
.master("local-cluster[2, 1, 1024]") \
.getOrCreate()
wine = sklearn.datasets.load_wine()
self._pandas_df = pd.DataFrame(wine.data[:, :11], columns=wine.feature_names[:11])
knn = KNeighborsClassifier()
knn.fit(self._pandas_df, wine.target)
self._model_path = os.path.join(self._tmp, "model")
mlflow.sklearn.save_model(knn, path=self._model_path)
self._predict = knn.predict(self._pandas_df)
def tearDown(self):
shutil.rmtree(self._tmp)
@pytest.mark.large
def test_spark_udf(self):
pandas_df = self._pandas_df
spark_df = self.spark.createDataFrame(pandas_df)
pyfunc_udf = spark_udf(self.spark, self._model_path, result_type="integer")
new_df = spark_df.withColumn("prediction", pyfunc_udf(*self._pandas_df.columns))
spark_results = new_df.collect()
# Compare against directly running the model.
direct_model = load_pyfunc(self._model_path)
pandas_results = direct_model.predict(pandas_df)
self.assertEqual(178, len(pandas_results))
self.assertEqual(178, len(spark_results))
for i in range(0, len(pandas_results)): # noqa
self.assertEqual(self._predict[i], pandas_results[i])
self.assertEqual(pandas_results[i], spark_results[i]['prediction'])
@pytest.mark.large
def test_model_cache(self):
archive_path = SparkModelCache.add_local_model(self.spark, self._model_path)
assert archive_path != self._model_path
# Ensure we can use the model locally.
local_model = SparkModelCache.get_or_load(archive_path)
assert isinstance(local_model, KNeighborsClassifier)
# Request the model on all executors, and see how many times we got cache hits.
def get_model(_):
model = SparkModelCache.get_or_load(archive_path)
assert isinstance(model, KNeighborsClassifier)
return SparkModelCache._cache_hits
# This will run 30 distinct tasks, and we expect most to reuse an already-loaded model.
# Note that we can't necessarily expect an even split, or even that there were only
# exactly 2 python processes launched, due to Spark and its mysterious ways, but we do
# expect significant reuse.
results = self.spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect()
# TODO(tomas): Looks like spark does not reuse python workers with python==3.x
assert sys.version[0] == '3' or max(results) > 10
# Running again should see no newly-loaded models.
results2 = self.spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect()
assert sys.version[0] == '3' or min(results2) > 0
|
136982
|
from secml.testing import CUnitTest
from secml.array import CArray
from secml.data import CDataset
from secml.ml.features import CPreProcess
from secml.optim.function import CFunction
from secml.figure import CFigure
from secml.core.constants import eps
class CClassifierTestCases(CUnitTest):
"""Unittests interface for CClassifier."""
def _check_df_scores(self, s, n_samples):
"""Checks for `decision_function` output.
Parameters
----------
s : CArray
Scores.
n_samples : int
Number of expected samples.
"""
self.assertEqual(type(s), CArray)
self.assertTrue(s.isdense)
self.assertEqual(1, s.ndim)
self.assertEqual((n_samples,), s.shape)
self.assertIsSubDtype(s.dtype, float)
def _check_classify_scores(self, l, s, n_samples, n_classes):
"""Checks for `classify` output.
Parameters
----------
l : CArray
Labels.
s : CArray
Scores.
n_samples : int
Number of expected samples.
n_classes : int
Number of expected classes.
"""
self.assertEqual(type(l), CArray)
self.assertEqual(type(s), CArray)
self.assertTrue(l.isdense)
self.assertTrue(s.isdense)
self.assertEqual(1, l.ndim)
self.assertEqual(2, s.ndim)
self.assertEqual((n_samples,), l.shape)
self.assertEqual((n_samples, n_classes), s.shape)
self.assertIsSubDtype(l.dtype, int)
self.assertIsSubDtype(s.dtype, float)
def _test_fun(self, clf, ds):
"""Test for `decision_function` and `predict`
Parameters
----------
clf : CClassifier
ds : CDataset
Returns
-------
scores : CArray
Classifier scores computed on a single point.
"""
self.logger.info(
"Test for decision_function() and predict() methods.")
if ds.issparse:
self.logger.info("Testing on sparse data...")
else:
self.logger.info("Testing on dense data...")
clf.fit(ds.X, ds.Y)
# we have to ensure at least 2d here, since _decision_function is not
# applying this change anymore (while decision_function does).
x = x_norm = ds.X.atleast_2d()
p = p_norm = ds.X[0, :].ravel().atleast_2d()
# Transform data if preprocess is defined
if clf.preprocess is not None:
x_norm = clf.preprocess.forward(x)
p_norm = clf.preprocess.forward(p)
# Testing decision_function on multiple points
df, df_priv = [], []
for y in range(ds.num_classes):
df.append(clf.decision_function(x, y=y))
df_priv.append(clf._forward(x_norm)[:, y].ravel())
self.logger.info(
"decision_function(x, y={:}): {:}".format(y, df[y]))
self.logger.info(
"_decision_function(x_norm, y={:}): {:}".format(y, df_priv[y]))
self._check_df_scores(df_priv[y], ds.num_samples)
self._check_df_scores(df[y], ds.num_samples)
self.assertFalse((df[y] != df_priv[y]).any())
# Testing predict on multiple points
labels, scores = clf.predict(
x, return_decision_function=True)
self.logger.info(
"predict(x):\nlabels: {:}\nscores: {:}".format(labels, scores))
self._check_classify_scores(
labels, scores, ds.num_samples, clf.n_classes)
# Comparing output of decision_function and predict
for y in range(ds.num_classes):
self.assertFalse((df[y] != scores[:, y].ravel()).any())
# Testing decision_function on single point
df, df_priv = [], []
for y in range(ds.num_classes):
df.append(clf.decision_function(p, y=y))
df_priv.append(clf._forward(p_norm)[:, y].ravel())
self.logger.info(
"decision_function(p, y={:}): {:}".format(y, df[y]))
self._check_df_scores(df[y], 1)
self.logger.info(
"_decision_function(p_norm, y={:}): {:}".format(y, df_priv[y]))
self._check_df_scores(df_priv[y], 1)
self.assertFalse((df[y] != df_priv[y]).any())
self.logger.info("Testing predict on single point")
labels, scores = clf.predict(
p, return_decision_function=True)
self.logger.info(
"predict(p):\nlabels: {:}\nscores: {:}".format(labels, scores))
self._check_classify_scores(labels, scores, 1, clf.n_classes)
# Comparing output of decision_function and predict
for y in range(ds.num_classes):
self.assertFalse((df[y] != scores[:, y].ravel()).any())
return scores
def _test_plot(self, clf, ds, levels=None):
"""Plot the decision function of a classifier."""
self.logger.info("Testing classifiers graphically")
# Preparation of the grid
fig = CFigure(width=8, height=4, fontsize=8)
clf.fit(ds.X, ds.Y)
fig.subplot(1, 2, 1)
fig.sp.plot_ds(ds)
fig.sp.plot_decision_regions(
clf, n_grid_points=50, grid_limits=ds.get_bounds())
fig.sp.title("Decision regions")
fig.subplot(1, 2, 2)
fig.sp.plot_ds(ds)
fig.sp.plot_fun(clf.decision_function, grid_limits=ds.get_bounds(),
levels=levels, y=1)
fig.sp.title("Discriminant function for y=1")
return fig
# TODO: consider moving at the CModule level!
def _test_gradient_numerical(self, clf, x, extra_classes=None,
th=1e-3, epsilon=eps, **grad_kwargs):
"""Test for clf.grad_f_x comparing to numerical gradient.
Parameters
----------
clf : CClassifier
x : CArray
extra_classes : None or list of int, optional
Any extra class which gradient wrt should be tested
th : float, optional
The threshold for the check with numerical gradient.
epsilon : float, optional
The epsilon to use for computing the numerical gradient.
grad_kwargs : kwargs
Any extra parameter for the gradient function.
Returns
-------
grads : list of CArray
A list with the gradients computed wrt each class.
"""
if 'y' in grad_kwargs:
raise ValueError("`y` cannot be passed to this unittest.")
if extra_classes is not None:
classes = clf.classes.append(extra_classes)
else:
classes = clf.classes
grads = []
for c in classes:
grad_kwargs['y'] = c # Appending class to test_f_x
# Analytical gradient
gradient = clf.grad_f_x(x, **grad_kwargs)
grads.append(gradient)
self.assertTrue(gradient.is_vector_like)
self.assertEqual(x.size, gradient.size)
# Numerical gradient
num_gradient = CFunction(
clf.decision_function).approx_fprime(x.todense(), epsilon, y=c)
# Compute the norm of the difference
error = (gradient - num_gradient).norm()
self.logger.info(
"Analytic grad wrt. class {:}:\n{:}".format(c, gradient))
self.logger.info(
"Numeric gradient wrt. class {:}:\n{:}".format(
c, num_gradient))
self.logger.info("norm(grad - num_grad): {:}".format(error))
self.assertLess(error, th)
self.assertIsSubDtype(gradient.dtype, float)
return grads
@staticmethod
def _create_preprocess_chain(pre_id_list, kwargs_list):
"""Creates a preprocessor with other preprocessors chained
and a list of the same preprocessors (not chained)"""
chain = None
pre_list = []
for i, pre_id in enumerate(pre_id_list):
chain = CPreProcess.create(
pre_id, preprocess=chain, **kwargs_list[i])
pre_list.append(CPreProcess.create(pre_id, **kwargs_list[i]))
return chain, pre_list
def _create_preprocess_test(self, ds, clf, pre_id_list, kwargs_list):
"""Fit 2 clf, one with internal preprocessor chain
and another using pre-transformed data.
Parameters
----------
ds : CDataset
clf : CClassifier
pre_id_list : list of str
This list should contain the class_id of each preprocessor
that should be part of the chain.
kwargs_list : list of dict
This list should contain a dictionary of extra parameter for
each preprocessor that should be part of the chain.
Returns
-------
pre1 : CPreProcess
The preprocessors chain.
data_pre : CArray
Data (ds.X) transformed using pre1.
clf_pre : CClassifier
The classifier with a copy the preprocessors chain inside,
trained on ds.
clf : CClassifier
The classifier without the preprocessors chain inside,
trained on data_pre.
"""
pre1 = CPreProcess.create_chain(pre_id_list, kwargs_list)
data_pre = pre1.fit_transform(ds.X)
pre2 = CPreProcess.create_chain(pre_id_list, kwargs_list)
clf_pre = clf.deepcopy()
clf_pre.preprocess = pre2
clf_pre.fit(ds.X, ds.Y)
clf.fit(data_pre, ds.Y)
return pre1, data_pre, clf_pre, clf
def _test_preprocess(self, ds, clf, pre_id_list, kwargs_list):
"""Test if clf with preprocessor inside returns the same
prediction of the clf trained on pre-transformed data.
Parameters
----------
ds : CDataset
clf : CClassifier
pre_id_list : list of str
This list should contain the class_id of each preprocessor
that should be part of the chain.
kwargs_list : list of dict
This list should contain a dictionary of extra parameter for
each preprocessor that should be part of the chain.
"""
pre, data_pre, clf_pre, clf = self._create_preprocess_test(
ds, clf, pre_id_list, kwargs_list)
self.logger.info(
"Testing {:} with preprocessor inside:\n{:}".format(
clf.__class__.__name__, clf_pre))
y1, score1 = clf_pre.predict(ds.X, return_decision_function=True)
y2, score2 = clf.predict(data_pre, return_decision_function=True)
self.assert_array_equal(y1, y2)
self.assert_array_almost_equal(score1, score2)
# The number of features of the clf with preprocess inside should be
# equal to the number of dataset features (so before preprocessing)
self.assertEqual(ds.num_features, clf_pre.n_features)
def _test_preprocess_grad(self, ds, clf, pre_id_list, kwargs_list,
extra_classes=None, check_numerical=True,
th=1e-3, epsilon=eps, **grad_kwargs):
"""Test if clf gradient with preprocessor inside is equal to the
gradient of the clf trained on pre-transformed data.
Also compare the gradient of the clf with preprocessor
inside with numerical gradient.
Parameters
----------
ds : CDataset
clf : CClassifier
pre_id_list : list of str
This list should contain the class_id of each preprocessor
that should be part of the chain.
kwargs_list : list of dict
This list should contain a dictionary of extra parameter for
each preprocessor that should be part of the chain.
extra_classes : None or list of int, optional
Any extra class which gradient wrt should be tested
check_numerical : bool, optional
If True, the gradient will be compared with
the numerical approximation.
th : float, optional
The threshold for the check with numerical gradient.
epsilon : float, optional
The epsilon to use for computing the numerical gradient.
grad_kwargs : kwargs
Any extra parameter for the gradient function.
"""
pre, data_pre, clf_pre, clf = self._create_preprocess_test(
ds, clf, pre_id_list, kwargs_list)
self.logger.info("Testing clf gradient with preprocessor "
"inside:\n{:}".format(clf_pre))
if 'y' in grad_kwargs:
raise ValueError("`y` cannot be passed to this unittest.")
if extra_classes is not None:
classes = clf.classes.append(extra_classes)
else:
classes = clf.classes
for c in classes:
self.logger.info(
"Testing grad wrt. class {:}".format(c))
# Grad of clf without preprocessor inside (using transformed data)
v_pre = data_pre[0, :]
clf_grad = clf.grad_f_x(v_pre, y=c, **grad_kwargs)
# Output of grad_f_x should be a float vector
self.assertEqual(1, clf_grad.ndim)
self.assertIsSubDtype(clf_grad.dtype, float)
# Gradient of clf with preprocessor inside
v = ds.X[0, :]
clf_pre_grad = clf_pre.grad_f_x(v, y=c, **grad_kwargs)
# Gradient of the preprocessor. Should be equal to the gradient
# of the clf with preprocessor inside
pre_grad = pre.gradient(v_pre, w=clf_grad)
# As clf_grad should be a float vector,
# output of gradient should be the same
self.assertEqual(1, pre_grad.ndim)
self.assertIsSubDtype(pre_grad.dtype, float)
self.assert_array_almost_equal(clf_pre_grad, pre_grad)
if check_numerical is True:
# Comparison with numerical gradient
self._test_gradient_numerical(
clf_pre, ds.X[0, :], extra_classes=extra_classes,
th=th, epsilon=epsilon, **grad_kwargs)
def _test_sparse_linear(self, ds, clf):
"""Test linear classifier operations on sparse data.
For linear classifiers, when training on sparse data, the weights
vector must be sparse. Also `grad_f_x` must return a sparse array.
Parameters
----------
ds : CDataset
clf : CClassifier
"""
self.logger.info("Testing {:} operations on sparse data.".format(
clf.__class__.__name__))
ds_sparse = ds.tosparse()
# Fitting on sparse data
clf.fit(ds_sparse.X, ds_sparse.Y)
# Resulting weights vector must be sparse
# self.assertTrue(clf.w.issparse)
# Predictions on dense and sparse data
x = ds.X[0, :]
x_sparse = ds_sparse.X[0, :]
y, s = clf.predict(
x, return_decision_function=True)
y_sparse, s_sparse = clf.predict(
x_sparse, return_decision_function=True)
self.assert_array_equal(y, y_sparse)
self.assert_array_equal(s, s_sparse)
# TODO: this is false. gradient can be dense...
# Gradient must be sparse if training data is sparse
# grad = clf.grad_f_x(x_sparse, y=0)
# self.assertTrue(grad.issparse)
# grad = clf.grad_f_x(x, y=0)
# self.assertTrue(grad.issparse)
if __name__ == '__main__':
CUnitTest.main()
|
136985
|
import unittest
from pyEpiabm.routine import AbstractPopulationFactory
class TestPopConfig(unittest.TestCase):
"""Test the 'ToyPopConfig' class.
"""
def test_make_pop(self):
"""Tests for a make population method.
"""
with self.assertRaises(NotImplementedError):
AbstractPopulationFactory.make_pop()
if __name__ == '__main__':
unittest.main()
|
137012
|
import argparse
import sys, os
parser = argparse.ArgumentParser()
parser.add_argument('pyqlabpath', help='path to PyQLab directory')
parser.add_argument('nbrSegments', type=int, help='nbrSegments')
args = parser.parse_args()
sys.path.append(args.pyqlabpath)
from Libraries import instrumentLib
if 'X6' not in instrumentLib.instrDict.keys():
sys.exit(1)
X6=instrumentLib['X6']
X6.nbrSegments = int(args.nbrSegments)
instrumentLib.write_to_file()
|
137037
|
from ray import tune
def get_config():
return {
# === Environment ===
"env": "Navigation",
"env_config": tune.grid_search(
[
{"deceleration_zones": None},
{"deceleration_zones": {"center": [[0.0, 0.0]], "decay": [2.0]}},
]
),
# === Replay Buffer ===
"buffer_size": int(1e4),
# === Optimization ===
# Name of Pytorch optimizer class for paremetrized policy
"optimizer": "Adam",
# Keyword arguments to be passed to the on-policy optimizer
"optimizer_options": {
"model": {"lr": 3e-4},
"value": {"lr": 3e-4},
"policy": {"lr": 3e-4},
},
# Clip gradient norms by this value
"max_grad_norm": 1e3,
# === Regularization ===
"kl_schedule": {
"initial_coeff": 0.2,
"desired_kl": 0.01,
"adaptation_coeff": 1.01,
"threshold": 1.0,
},
# === Network ===
# Size and activation of the fully connected networks computing the logits
# for the policy, value function and model. No layers means the component is
# linear in states and/or actions.
"module": {
"actor": {
"encoder": {
"units": (64, 64),
"activation": "ReLU",
"initializer_options": {"name": "xavier_uniform"},
},
"input_dependent_scale": False,
},
"critic": {
"target_vf": True,
"encoder": {
"units": (64, 64),
"activation": "ReLU",
"initializer_options": {"name": "xavier_uniform"},
},
},
"model": {
"residual": True,
"input_dependent_scale": False,
"encoder": {
"units": (64, 64),
"activation": "ReLU",
"delay_action": True,
"initializer_options": {"name": "xavier_uniform"},
},
},
},
# === RolloutWorker ===
"rollout_fragment_length": 1,
"batch_mode": "complete_episodes",
# === Trainer ===
"train_batch_size": 32,
"timesteps_per_iteration": 200,
# === Exploration ===
"exploration_config": {"pure_exploration_steps": 200},
# === Evaluation ===
"evaluation_interval": 5,
"evaluation_num_episodes": 5,
}
|
137040
|
def test_bounds():
from pyvmmonitor_core.math_utils import Bounds
bounds = Bounds()
assert not bounds.is_valid()
bounds.add_point((10, 10))
assert bounds.is_valid()
assert bounds.width == 0
assert bounds.height == 0
bounds.add_point((0, 0))
assert bounds.nodes == ((0, 0), (0, 10), (10, 10), (10, 0))
assert bounds.width == 10
assert bounds.height == 10
assert bounds.center == (5, 5)
x, y, w, h = bounds
assert (x, y, w, h) == (0, 0, 10, 10)
|
137084
|
import os
from unittest import mock
import pytest
from iotedgedev.envvars import EnvVars
from iotedgedev.output import Output
pytestmark = pytest.mark.unit
def test_get_envvar__valid():
envvars = EnvVars(Output())
deployment_template = envvars.get_envvar("DEPLOYMENT_CONFIG_TEMPLATE_FILE")
assert deployment_template is not None
def test_get_envvar__invalid():
envvars = EnvVars(Output())
testerval = envvars.get_envvar("TESTER")
assert not testerval
def test_load_valid():
envvars = EnvVars(Output())
envvars.load()
assert envvars.DEPLOYMENT_CONFIG_TEMPLATE_FILE == "deployment.template.json"
def test_verify_envvar_has_val__valid():
envvars = EnvVars(Output())
envvars.load()
result = envvars.verify_envvar_has_val("DEPLOYMENT_CONFIG_TEMPLATE_FILE", envvars.DEPLOYMENT_CONFIG_TEMPLATE_FILE)
assert not result
def test_get_envvar_key_if_val__valid():
envvars = EnvVars(Output())
assert envvars.get_envvar_key_if_val("DEPLOYMENT_CONFIG_TEMPLATE_FILE")
def test_get_envvar_key_if_val__invalid():
envvars = EnvVars(Output())
assert not envvars.get_envvar_key_if_val("TESTER")
def test_set_envvar():
envvars = EnvVars(Output())
registry_server = envvars.get_envvar("DEPLOYMENT_CONFIG_TEMPLATE_FILE")
envvars.set_envvar("DEPLOYMENT_CONFIG_TEMPLATE_FILE", "deployment.template_new.json")
new_registry_server = envvars.get_envvar("DEPLOYMENT_CONFIG_TEMPLATE_FILE")
assert new_registry_server == "deployment.template_new.json"
envvars.set_envvar("DEPLOYMENT_CONFIG_TEMPLATE_FILE", registry_server)
def test_envvar_clean():
EnvVars(Output())
envvar_clean_name = u"IOTEDGEDEV_ENVVAR_CLEAN_TEST"
os.environ[envvar_clean_name] = u"test unicode string"
@pytest.mark.parametrize(
"command, command_list",
[
("solution new test_solution", ["init", "e2e", "solution new", "new", "simulator stop"]),
("solution new", ["init", "e2e", "solution new", "new", "simulator stop"]),
("", ["init", "e2e", "", "new", "simulator stop"]),
]
)
def test_in_command_list_true(command, command_list):
envvars = EnvVars(Output())
assert envvars.in_command_list(command, command_list)
@pytest.mark.parametrize(
"command, command_list",
[
("solution add filtermodule", ["init", "e2e", "solution new", "new", "simulator stop"]),
("solution addotherstuff filtermodule", ["init", "e2e", "solution add", "new", "simulator stop"]),
("", ["init", "e2e", "solution new", "new", "simulator stop"]),
("solution new test_solution", ["init", "e2e", "", "new", "simulator stop"])
]
)
def test_in_command_list_false(command, command_list):
envvars = EnvVars(Output())
assert not envvars.in_command_list(command, command_list)
@pytest.mark.parametrize(
"command",
[
"iothub setup --update-dotenv",
""
]
)
def test_is_terse_command_true(command):
envvars = EnvVars(Output())
assert envvars.is_terse_command(command)
def test_is_terse_command_false():
envvars = EnvVars(Output())
assert not envvars.is_terse_command("solution add")
def test_default_container_registry_server_key_exists():
envvars = EnvVars(Output())
envvars.load()
assert "CONTAINER_REGISTRY_SERVER" in os.environ
@pytest.mark.parametrize(
"envvar",
[
"CONTAINER_REGISTRY_SERVER",
"CONTAINER_REGISTRY_USERNAME",
"CONTAINER_REGISTRY_PASSWORD"
]
)
def test_default_envvar_value_exists(envvar):
envvars = EnvVars(Output())
server = envvars.get_envvar(envvar)
assert server is not None
def test_container_registry_server_key_missing_sys_exit():
envvars = EnvVars(Output())
with pytest.raises(ValueError):
envvars.get_envvar("CONTAINER_REGISTRY_SERVER_UNITTEST", required=True)
@pytest.mark.parametrize(
"envvar",
[
"CONTAINER_REGISTRY_SERVER",
"CONTAINER_REGISTRY_USERNAME",
"CONTAINER_REGISTRY_PASSWORD"
]
)
def test_unique_envvar_tokens(envvar):
unique = set()
envvar_lenght = len(envvar)
is_unique = True
envvars = EnvVars(Output())
envvars.load()
for key in os.environ:
if key.startswith(envvar):
token = key[envvar_lenght:]
if token not in unique:
unique.add(token)
else:
is_unique = False
assert is_unique
@mock.patch.dict(os.environ, {
"CONTAINER_REGISTRY_SERVER_UNITTEST": "unittest.azurecr.io",
"CONTAINER_REGISTRY_USERNAME_UNITTEST": "username",
"CONTAINER_REGISTRY_PASSWORD_UNITTEST": "password"
})
def test_additional_container_registry_map_is_set_from_environ():
envvars = EnvVars(Output())
envvars.load()
assert len(envvars.CONTAINER_REGISTRY_MAP) == 2
assert 'UNITTEST' in envvars.CONTAINER_REGISTRY_MAP.keys()
assert envvars.CONTAINER_REGISTRY_MAP['UNITTEST'].server == 'unittest.azurecr.io'
assert envvars.CONTAINER_REGISTRY_MAP['UNITTEST'].username == 'username'
assert envvars.CONTAINER_REGISTRY_MAP['UNITTEST'].password == 'password'
|
137176
|
from ctypes import c_int, c_char_p, c_void_p, CFUNCTYPE
from ctypes import POINTER as _P
from .dll import _bind, SDLFunc, AttributeDict
__all__ = [
# Defines
"SDL_MAX_LOG_MESSAGE",
# Enums
"SDL_LogCategory",
"SDL_LOG_CATEGORY_APPLICATION",
"SDL_LOG_CATEGORY_ERROR", "SDL_LOG_CATEGORY_ASSERT",
"SDL_LOG_CATEGORY_SYSTEM", "SDL_LOG_CATEGORY_AUDIO",
"SDL_LOG_CATEGORY_VIDEO", "SDL_LOG_CATEGORY_RENDER",
"SDL_LOG_CATEGORY_INPUT", "SDL_LOG_CATEGORY_TEST",
"SDL_LOG_CATEGORY_RESERVED1", "SDL_LOG_CATEGORY_RESERVED2",
"SDL_LOG_CATEGORY_RESERVED3", "SDL_LOG_CATEGORY_RESERVED4",
"SDL_LOG_CATEGORY_RESERVED5", "SDL_LOG_CATEGORY_RESERVED6",
"SDL_LOG_CATEGORY_RESERVED7", "SDL_LOG_CATEGORY_RESERVED8",
"SDL_LOG_CATEGORY_RESERVED9", "SDL_LOG_CATEGORY_RESERVED10",
"SDL_LOG_CATEGORY_CUSTOM",
"SDL_LogPriority",
"SDL_LOG_PRIORITY_VERBOSE",
"SDL_LOG_PRIORITY_DEBUG", "SDL_LOG_PRIORITY_INFO",
"SDL_LOG_PRIORITY_WARN", "SDL_LOG_PRIORITY_ERROR",
"SDL_LOG_PRIORITY_CRITICAL", "SDL_NUM_LOG_PRIORITIES",
# Callback Functions
"SDL_LogOutputFunction",
]
# Constants & enums
SDL_MAX_LOG_MESSAGE = 4096
SDL_LogCategory = c_int
SDL_LOG_CATEGORY_APPLICATION = 0
SDL_LOG_CATEGORY_ERROR = 1
SDL_LOG_CATEGORY_ASSERT = 2
SDL_LOG_CATEGORY_SYSTEM = 3
SDL_LOG_CATEGORY_AUDIO = 4
SDL_LOG_CATEGORY_VIDEO = 5
SDL_LOG_CATEGORY_RENDER = 6
SDL_LOG_CATEGORY_INPUT = 7
SDL_LOG_CATEGORY_TEST = 8
SDL_LOG_CATEGORY_RESERVED1 = 9
SDL_LOG_CATEGORY_RESERVED2 = 10
SDL_LOG_CATEGORY_RESERVED3 = 11
SDL_LOG_CATEGORY_RESERVED4 = 12
SDL_LOG_CATEGORY_RESERVED5 = 13
SDL_LOG_CATEGORY_RESERVED6 = 14
SDL_LOG_CATEGORY_RESERVED7 = 15
SDL_LOG_CATEGORY_RESERVED8 = 16
SDL_LOG_CATEGORY_RESERVED9 = 17
SDL_LOG_CATEGORY_RESERVED10 = 18
SDL_LOG_CATEGORY_CUSTOM = 19
SDL_LogPriority = c_int
SDL_LOG_PRIORITY_VERBOSE = 1
SDL_LOG_PRIORITY_DEBUG = 2
SDL_LOG_PRIORITY_INFO = 3
SDL_LOG_PRIORITY_WARN = 4
SDL_LOG_PRIORITY_ERROR = 5
SDL_LOG_PRIORITY_CRITICAL = 6
SDL_NUM_LOG_PRIORITIES = 7
# Callback function definitions
SDL_LogOutputFunction = CFUNCTYPE(None, c_void_p, c_int, SDL_LogPriority, c_char_p)
# Raw ctypes function definitions
# TODO: do we want SDL_LogMessageV?
_funcdefs = [
SDLFunc("SDL_LogSetAllPriority", [SDL_LogPriority]),
SDLFunc("SDL_LogSetPriority", [c_int, SDL_LogPriority]),
SDLFunc("SDL_LogGetPriority", [c_int], SDL_LogPriority),
SDLFunc("SDL_LogResetPriorities"),
SDLFunc("SDL_Log", [c_char_p]),
SDLFunc("SDL_LogVerbose", [c_int, c_char_p]),
SDLFunc("SDL_LogDebug", [c_int, c_char_p]),
SDLFunc("SDL_LogInfo", [c_int, c_char_p]),
SDLFunc("SDL_LogWarn", [c_int, c_char_p]),
SDLFunc("SDL_LogError", [c_int, c_char_p]),
SDLFunc("SDL_LogCritical", [c_int, c_char_p]),
SDLFunc("SDL_LogMessage", [c_int, SDL_LogPriority, c_char_p]),
SDLFunc("SDL_LogGetOutputFunction", [_P(SDL_LogOutputFunction), c_void_p]),
SDLFunc("SDL_LogSetOutputFunction", [SDL_LogOutputFunction, c_void_p]),
]
_ctypes = AttributeDict()
for f in _funcdefs:
_ctypes[f.name] = _bind(f.name, f.args, f.returns, f.added)
__all__.append(f.name) # Add all bound functions to module namespace
# Aliases for ctypes bindings
SDL_LogSetAllPriority = _ctypes["SDL_LogSetAllPriority"]
SDL_LogSetPriority = _ctypes["SDL_LogSetPriority"]
SDL_LogGetPriority = _ctypes["SDL_LogGetPriority"]
SDL_LogResetPriorities = _ctypes["SDL_LogResetPriorities"]
SDL_Log = _ctypes["SDL_Log"]
SDL_LogVerbose = _ctypes["SDL_LogVerbose"]
SDL_LogDebug = _ctypes["SDL_LogDebug"]
SDL_LogInfo = _ctypes["SDL_LogInfo"]
SDL_LogWarn = _ctypes["SDL_LogWarn"]
SDL_LogError = _ctypes["SDL_LogError"]
SDL_LogCritical = _ctypes["SDL_LogCritical"]
SDL_LogMessage = _ctypes["SDL_LogMessage"]
SDL_LogGetOutputFunction = _ctypes["SDL_LogGetOutputFunction"]
SDL_LogSetOutputFunction = _ctypes["SDL_LogSetOutputFunction"]
|
137190
|
def lazyproperty(fn):
attr_name = '__' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
|
137204
|
from kubernetes import client
from kubernetes.client.rest import ApiException
from .load_kube_config import kubeConfig
kubeConfig.load_kube_config()
apps = client.AppsV1Api()
class K8sStatefulSet:
def get_sts(ns, logger):
try:
if ns != 'all':
logger.info ("Fetching {} namespace statefulSets data...".format(ns))
namespace = ns
statefulsets = apps.list_namespaced_stateful_set(namespace, timeout_seconds=10)
else:
logger.info ("Fetching all namespace statefulSets data...")
statefulsets = apps.list_stateful_set_for_all_namespaces(timeout_seconds=10)
return statefulsets
except ApiException as e:
logger.warning("Exception when calling AppsV1Api->list_namespaced_stateful_set: %s\n" % e)
|
137210
|
import pandas as pd
import torch
from typing import Callable, List
from torch.utils.data import TensorDataset
class CsvDataset(TensorDataset):
data: pd
y_cols: List
x_cols: List
transform: Callable
test_fraction: float = 0.0
train: bool
def __init__(self, file_path: str, y_cols: List, x_cols: List, train: bool = True,
transform: Callable = lambda: None, test_fraction: float = 0.0, nrows: int = None):
self.__dict__.update(**vars())
self.data = pd.read_csv(**{'filepath_or_buffer': file_path, 'nrows': nrows})
data_length = len(self.data)
self.test_size = int(data_length * self.test_fraction)
self.train_size = data_length - self.test_size
self.train_data = self.data.iloc[0:self.train_size]
self.test_data = self.data.iloc[self.train_size:]
if train:
x, y = torch.tensor(self.train_data[self.x_cols].values), torch.tensor(self.train_data[self.y_cols].values)
else:
x, y = torch.tensor(self.test_data[self.x_cols].values), torch.tensor(self.test_data[self.y_cols].values)
super(CsvDataset, self).__init__(x, y)
|
137213
|
from .base_menu import Menu
from .tasks import buyAirtime
class Airtime(Menu):
def get_phone_number(self): # 10
if self.user_response == '1':
self.session["phone_number"] = self.phone_number
menu_text = "Buy Airtime\nPlease Enter Amount(Ksh)"
self.session['level'] = 12
return self.ussd_proceed(menu_text)
if self.user_response == '2':
menu_text = "Buy Airtime\nPlease enter phone number as (+2547XXXXXXXX)"
self.session['level'] = 11
return self.ussd_proceed(menu_text)
return self.home()
def another_number(self): # 11
if not self.user_response.startswith("+"):
menu_text = "Buy Airtime\nPlease enter a valid phone number as (+2547XXXXXXXX)"
return self.ussd_proceed(menu_text)
self.session['phone_number'] = self.phone_number
menu_text = "Buy Airtime\nPlease Enter Amount(Ksh)"
self.session['level'] = 12
return self.ussd_proceed(menu_text)
def get_amount(self, amount=None): # level 12
if int(self.user_response) < 5 and amount is None:
menu_text = "Buy Airtime\nYou can only buy airtime above Ksh 5.00. Please enter amount"
return self.ussd_proceed(menu_text)
if amount is None:
self.session['amount'] = int(self.user_response)
self.session['level'] = 13
menu_text = "Purchase Ksh{:.2f} worth of airtime for {}\n".format(self.session.get('amount'),
self.session.get("phone_number"))
menu_text += "1.Confirm\n2.Cancel"
return self.ussd_proceed(menu_text)
def confirm(self): # 13
if self.user_response == "1":
menu_text = "Please wait as we load your account."
buyAirtime.apply_async(
kwargs={'phone_number': self.session['phone_number'], 'amount': self.session['amount'],
'account_phoneNumber': self.user.phone_number})
return self.ussd_end(menu_text)
if self.user_response == "2":
menu_text = "Thank you for doing business with us"
return self.ussd_end(menu_text)
return self.get_amount(amount=True)
def execute(self):
level = self.session.get('level')
menu = {
10: self.get_phone_number,
11: self.another_number,
12: self.get_amount,
13: self.confirm
}
return menu.get(level, self.home)()
|
137223
|
from __future__ import absolute_import, print_function, division
import copy
import unittest
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import numpy
from six.moves import xrange
import theano
import theano.sandbox.cuda as cuda_ndarray
from theano.tensor.basic import _allclose
from theano.tests import unittest_tools as utt
if not cuda_ndarray.cuda_available:
raise SkipTest('Optional package cuda disabled')
def advantage(cpu_dt, gpu_dt):
"""
Return ratio of cpu_dt / gpu_dt, which must be non-negative numbers.
If both arguments are zero, return NaN.
If only gpu_dt is zero, return Inf.
"""
assert gpu_dt >= 0 and cpu_dt >= 0
if gpu_dt == 0 and cpu_dt == 0:
return numpy.nan
elif gpu_dt == 0:
return numpy.inf
else:
return cpu_dt / gpu_dt
def test_host_to_device():
# print >>sys.stdout, 'starting test_host_to_dev'
for shape in ((), (3,), (2, 3), (3, 4, 5, 6)):
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
c = numpy.asarray(b)
assert numpy.all(a == c)
# test with float32 dtype
d = numpy.asarray(b, dtype='float32')
assert numpy.all(a == d)
# test with not float32 dtype
try:
numpy.asarray(b, dtype='int8')
assert False
except TypeError:
pass
def test_add_iadd_idiv():
for shapes in ([(5, 5), (5, 1)],
[(5, 5), (1, 5)],
(), (0,), (3,), (2, 3),
(1, 10000000), (10000, 1000), (1000000, 10),
(4100, 33, 34), (33, 4100, 34), (33, 34, 4100),
(4100, 33, 3, 6), (33, 4100, 3, 6), (33, 3, 4100, 6), (33, 3, 6, 4100),
(4100, 3, 34, 6), (3, 4100, 34, 6), (3, 34, 4100, 6), (3, 34, 6, 4100),
(4100, 3, 4, 36), (3, 4100, 4, 36), (3, 4, 4100, 36), (3, 4, 36, 4100),
(0, 0, 0, 0, 0),
(3, 34, 35, 36, 37),
(33, 34, 3, 36, 37),
(33, 34, 35, 36, 3),
(0, 0, 0, 0, 0, 0),
(3, 34, 35, 36, 37, 2),
(33, 34, 3, 36, 37, 2),
(33, 34, 35, 36, 3, 2),
(3, 4, 5, 6, 7, 1025),
(3, 4, 5, 6, 1025, 7),
(3, 4, 5, 1025, 6, 7),
(3, 4, 1025, 5, 6, 7),
(3, 1025, 4, 5, 6, 7),
(1025, 3, 4, 5, 6, 7),
):
if isinstance(shapes, tuple):
shape = shapes
shape2 = shapes
a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a0_orig = a0.copy()
a1 = a0.copy()
assert numpy.allclose(a0, a1)
else:
shape = shapes[0]
shape2 = shapes[1]
a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a0_orig = a0.copy()
a1 = theano._asarray(numpy.random.rand(*shape2), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1)
assert numpy.allclose(a0, numpy.asarray(b0))
assert numpy.allclose(a1, numpy.asarray(b1))
# add don't support stride
if shape == shape2:
bsum = b0 + b1
bsum = b0 + b1
asum = a0 + a1
asum = a0 + a1
# print shape, 'adding ', a0.size, 'cpu', cpu_dt, 'advantage', advantage(cpu_dt, gpu_dt)
assert numpy.allclose(asum, numpy.asarray(bsum))
# test not contiguous version.
# should raise not implemented.
a0 = a0_orig.copy()
b0 = cuda_ndarray.CudaNdarray(a0)
if len(shape) == 0:
continue
elif len(shape) == 1:
_b = b1[::-1]
elif len(shape) == 2:
_b = b1[::, ::-1]
elif len(shape) == 3:
_b = b1[::, ::, ::-1]
elif len(shape) == 4:
_b = b1[::, ::, ::, ::-1]
elif len(shape) == 5:
_b = b1[::, ::, ::, ::, ::-1]
elif len(shape) == 6:
_b = b1[::, ::, ::, ::, ::, ::-1]
else:
raise Exception("You need to modify this case!")
# TODO: b0[...,::-1] don't work
# test inplace version
b0 += b1
a0 += a1
# print shape, 'adding inplace', a0.size, 'cpu', cpu_dt, 'advantage', advantage(cpu_dt, gpu_dt)
assert numpy.allclose(a0, numpy.asarray(b0))
assert numpy.allclose(a0, a0_orig + a1)
b0 /= b1
a0 /= a1
assert numpy.allclose(a0, numpy.asarray(b0))
assert numpy.allclose(a0, (a0_orig + a1) / a1)
# test inplace version
# for not contiguous input
b0 += _b
a0 += a1[..., ::-1]
assert numpy.allclose(a0, numpy.asarray(b0))
assert numpy.allclose(a0, (a0_orig + a1) / a1 + a1[..., ::-1])
b0 /= _b
a0 /= a1[..., ::-1]
assert numpy.allclose(a0, numpy.asarray(b0))
assert numpy.allclose(a0, ((a0_orig + a1) / a1 +
a1[..., ::-1]) / a1[..., ::-1])
def test_exp():
# print >>sys.stdout, 'starting test_exp'
for shape in ((), (3,), (2, 3),
(1, 10000000), (10, 1000000),
(100, 100000), (1000, 10000), (10000, 1000)):
a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a1 = a0.copy()
b0 = cuda_ndarray.CudaNdarray(a0)
cuda_ndarray.CudaNdarray(a1)
bsum = b0.exp()
asum = numpy.exp(a1)
# print shape, 'adding ', a0.size, 'cpu', cpu_dt, 'advantage', advantage(cpu_dt, gpu_dt)
# c = numpy.asarray(b0+b1)
if asum.shape:
assert numpy.allclose(asum, numpy.asarray(bsum))
def test_copy():
# print >>sys.stdout, 'starting test_copy'
shape = (500, 499)
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
# print >>sys.stdout, '.. creating device object'
b = cuda_ndarray.CudaNdarray(a)
# print >>sys.stdout, '.. copy'
c = copy.copy(b)
# print >>sys.stdout, '.. deepcopy'
d = copy.deepcopy(b)
# print >>sys.stdout, '.. comparisons'
assert numpy.allclose(a, numpy.asarray(b))
assert numpy.allclose(a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
b += b
assert numpy.allclose(a + a, numpy.asarray(b))
assert numpy.allclose(a + a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
def test_nvcc_bug():
"""
The fct k_elemwise_unary_rowmajor_copy(used by cuda.copy()) in cuda_ndarray.cu
is not well compiled with nvcc 3.0 and 3.1 beta. We found a workaround, so it
sould work correctly. Without the workaround, this test fail.
"""
shape = (5, 4)
aa = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a = aa[::, ::-1]
b = cuda_ndarray.CudaNdarray(aa)[::, ::-1]
c = copy.copy(b)
d = copy.deepcopy(b)
assert numpy.allclose(a, numpy.asarray(b))
assert numpy.allclose(a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
b += b
assert numpy.allclose(a + a, numpy.asarray(b))
assert numpy.allclose(a + a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
class test_DimShuffle(unittest.TestCase):
def test_dimshuffle(self):
utt.seed_rng()
rng = numpy.random.RandomState(utt.fetch_seed())
# 2d -> 0d
a = theano._asarray(rng.randn(1, 1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(numpy.transpose(a),
cuda_ndarray.dimshuffle(b, ()))
# Test when we drop a axis that don't have shape 1
a = theano._asarray(rng.randn(2, 1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
self.assertRaises(ValueError, cuda_ndarray.dimshuffle, b, ())
# Test that we can't take a dimensions multiple time
a = theano._asarray(rng.randn(2, 1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
self.assertRaises(ValueError, cuda_ndarray.dimshuffle, b, (1, 1))
# 1d
a = theano._asarray(rng.randn(3,), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(numpy.transpose(a),
cuda_ndarray.dimshuffle(b, (0,)))
assert numpy.allclose(a[None, :, None],
cuda_ndarray.dimshuffle(b, (-1, 0, -1)))
# 2d
a = theano._asarray(rng.randn(3, 11), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(numpy.transpose(a),
cuda_ndarray.dimshuffle(b, (1, 0)))
assert numpy.allclose(numpy.transpose(a)[None, :, None, :, None],
cuda_ndarray.dimshuffle(b, (-1, 1, -1, 0, -1)))
# 2d -> 1d
a = theano._asarray(rng.randn(1, 11), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(a[:],
cuda_ndarray.dimshuffle(b, (1,)))
a = theano._asarray(rng.randn(11, 1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(a.reshape((11,)),
cuda_ndarray.dimshuffle(b, (0,)))
# 3d
a = theano._asarray(rng.randn(3, 4, 5), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(a, cuda_ndarray.dimshuffle(b, (0, 1, 2)))
assert numpy.allclose(numpy.swapaxes(a, 0, 1),
cuda_ndarray.dimshuffle(b, (1, 0, 2)))
assert numpy.allclose(numpy.swapaxes(a, 0, 2),
cuda_ndarray.dimshuffle(b, (2, 1, 0)))
assert numpy.allclose(numpy.swapaxes(a, 1, 2),
cuda_ndarray.dimshuffle(b, (0, 2, 1)))
assert numpy.allclose(numpy.swapaxes(a, 1, 2)[None, :, None, :, :, None],
cuda_ndarray.dimshuffle(b, (-1, 0, -1, 2, 1, -1)))
# 4d
a = theano._asarray(rng.randn(3, 11, 4, 5), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert numpy.allclose(numpy.swapaxes(a, 0, 1),
cuda_ndarray.dimshuffle(b, (1, 0, 2, 3)))
assert numpy.allclose(numpy.swapaxes(a, 0, 2),
cuda_ndarray.dimshuffle(b, (2, 1, 0, 3)))
assert numpy.allclose(numpy.swapaxes(a, 0, 3),
cuda_ndarray.dimshuffle(b, (3, 1, 2, 0)))
assert numpy.allclose(numpy.swapaxes(a, 0, 3),
cuda_ndarray.dimshuffle(b, (3, 1, 2, 0)))
assert numpy.allclose(numpy.swapaxes(a, 0, 3)[None, :, None, :, :, :],
cuda_ndarray.dimshuffle(b, (-1, 3, -1, 1, 2, 0)))
def test_dot():
# print >>sys.stdout, 'starting test_dot'
utt.seed_rng()
rng = numpy.random.RandomState(utt.fetch_seed())
a0 = theano._asarray(rng.randn(4, 7), dtype='float32')
a1 = theano._asarray(rng.randn(7, 6), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1)
assert _allclose(numpy.dot(a0, a1), cuda_ndarray.dot(b0, b1))
a1 = theano._asarray(rng.randn(6, 7), dtype='float32')
b1 = cuda_ndarray.CudaNdarray(a1)
numpy_version = numpy.dot(a0, a1.T)
transposed = cuda_ndarray.dimshuffle(b1, (1, 0))
cuda_version = cuda_ndarray.dot(b0, transposed)
assert _allclose(numpy_version, cuda_version)
a1 = theano._asarray(rng.randn(7, 6), dtype='float32')
b1 = cuda_ndarray.CudaNdarray(a1)
a0 = theano._asarray(rng.randn(7, 4), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert _allclose(numpy.dot(a0.T, a1),
cuda_ndarray.dot(
cuda_ndarray.dimshuffle(b0, (1, 0)), b1))
a1 = theano._asarray(rng.randn(6, 7), dtype='float32')
b1 = cuda_ndarray.CudaNdarray(a1)
assert _allclose(
numpy.dot(a0.T, a1.T),
cuda_ndarray.dot(cuda_ndarray.dimshuffle(b0, (1, 0)),
cuda_ndarray.dimshuffle(b1, (1, 0))))
def test_sum():
shape = (2, 3)
a0 = theano._asarray(numpy.arange(shape[0] * shape[1]).reshape(shape),
dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(),
numpy.asarray(b0.reduce_sum([1, 1])))
a0.sum(axis=0)
b0.reduce_sum([1, 0])
# print 'asum\n',a0sum
# print 'bsum\n',numpy.asarray(b0sum)
assert numpy.allclose(a0.sum(axis=0),
numpy.asarray(b0.reduce_sum([1, 0])))
assert numpy.allclose(a0.sum(axis=1),
numpy.asarray(b0.reduce_sum([0, 1])))
assert numpy.allclose(a0, numpy.asarray(b0.reduce_sum([0, 0])))
shape = (3, 4, 5, 6, 7, 8)
a0 = theano._asarray(numpy.arange(3 * 4 * 5 * 6 * 7 * 8).reshape(shape),
dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=5).sum(axis=3).sum(axis=0),
numpy.asarray(b0.reduce_sum([1, 0, 0, 1, 0, 1])))
shape = (16, 2048)
a0 = theano._asarray(numpy.arange(16 * 2048).reshape(shape),
dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1, 0])))
shape = (16, 10)
a0 = theano._asarray(numpy.arange(160).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1, 1])))
def test_reshape():
shapelist = [((1, 2, 3), (1, 2, 3)),
((1,), (1,)),
((1, 2, 3), (3, 2, 1)),
((1, 2, 3), (6,)),
((1, 2, 3, 2), (6, 2)),
((2, 3, 2), (6, 2)),
((2, 3, 2), (12,))
]
bad_shapelist = [
((1, 2, 3), (1, 2, 4)),
((1,), (2,)),
((1, 2, 3), (2, 2, 1)),
((1, 2, 3), (5,)),
((1, 2, 3, 2), (6, 3)),
((2, 3, 2), (5, 2)),
((2, 3, 2), (11,))
]
utt.seed_rng()
rng = numpy.random.RandomState(utt.fetch_seed())
def subtest(shape_1, shape_2, rng):
# print >> sys.stdout, "INFO: shapes", shape_1, shape_2
a = theano._asarray(rng.randn(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
aa = a.reshape(shape_2)
bb = b.reshape(shape_2)
n_bb = numpy.asarray(bb)
# print n_bb
assert numpy.all(aa == n_bb)
assert aa.shape == n_bb.shape
# Test the not contiguous case
shape_1_2x = (shape_1[0] * 2,) + shape_1[1:]
a = theano._asarray(rng.randn(*shape_1_2x), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
a = a[::2]
b = b[::2]
aa = a.reshape(shape_2)
bb = b.reshape(shape_2)
n_bb = numpy.asarray(bb)
# print n_bb
assert numpy.all(aa == n_bb)
assert aa.shape == n_bb.shape
def bad_subtest(shape_1, shape_2, rng):
a = theano._asarray(rng.randn(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
try:
b.reshape(shape_2)
except Exception:
return
assert False
# test working shapes
for shape_1, shape_2 in shapelist:
subtest(shape_1, shape_2, rng)
subtest(shape_2, shape_1, rng)
# test shape combinations that should give error
for shape_1, shape_2 in bad_shapelist:
bad_subtest(shape_1, shape_2, rng)
bad_subtest(shape_2, shape_1, rng)
def test_getshape():
shapelist = [
((1, 2, 3), (1, 2, 3)),
((1,), (1,)),
((1, 2, 3), (3, 2, 1)),
((1, 2, 3), (6,)),
((1, 2, 3, 2), (6, 2)),
((2, 3, 2), (6, 2))
]
def subtest(shape):
a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert b.shape == a.shape
for shape_1, shape_2 in shapelist:
subtest(shape_1)
subtest(shape_2)
def test_stride_manipulation():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
v = b.view()
v._dev_data += 0
c = numpy.asarray(v)
assert numpy.all(a == c)
sizeof_float = 4
offset = 0
b_strides = b._strides
for i in xrange(len(b.shape)):
offset += (b.shape[i] - 1) * b_strides[i]
v._set_stride(i, -b_strides[i])
v._dev_data += offset * sizeof_float
c = numpy.asarray(v)
assert numpy.all(c == [[5, 4, 3], [2, 1, 0]])
def test_subtensor_broadcastable():
a = numpy.zeros((2, 7), dtype='float32')
cuda_a = cuda_ndarray.CudaNdarray(a)
# Will have shape (1, 7), so the stride in the first dim should be 0
sub_a = cuda_a[1:]
assert sub_a.shape == (1, 7)
assert sub_a._strides[0] == 0
def test_copy_subtensor0():
sizeof_float = 4
a = theano._asarray(numpy.random.rand(30, 20, 5, 5), dtype='float32')
cuda_a = cuda_ndarray.CudaNdarray(a)
a_view = cuda_a.view()
a_view_strides = a_view._strides
a_view._set_stride(2, -a_view_strides[2])
a_view._set_stride(3, -a_view_strides[3])
a_view._dev_data += 24 * sizeof_float
a_view_copy = copy.deepcopy(a_view)
assert numpy.all(a[:, :, ::-1, ::-1] == numpy.asarray(a_view_copy))
def test_mapping_getitem_ellipsis():
a = theano._asarray(numpy.random.rand(5, 4, 3, 2), dtype='float32')
a = cuda_ndarray.CudaNdarray(a)
b = a[...]
assert b._dev_data == a._dev_data
assert b._strides == a._strides
assert b.shape == a.shape
def test_mapping_getitem_reverse_some_dims():
dim = (5, 4, 3, 2)
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_b = _a[:, :, ::-1, ::-1]
b = numpy.asarray(_b)
assert numpy.all(b == a[:, :, ::-1, ::-1])
def test_mapping_getitem_w_int():
def _cmp(x, y):
assert x.shape == y.shape
if not numpy.all(x == y):
print(x)
print(y)
assert numpy.all(x == y)
def _cmpf(x, *y):
try:
x.__getitem__(y)
except IndexError:
pass
else:
raise Exception("Did not generate out or bound error")
def _cmpfV(x, *y):
try:
if len(y) == 1:
x.__getitem__(*y)
else:
x.__getitem__(y)
except ValueError:
pass
else:
raise Exception("Did not generate out or bound error")
dim = (2,)
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[1]), a[1])
_cmp(numpy.asarray(_a[-1]), a[-1])
_cmp(numpy.asarray(_a[0]), a[0])
_cmp(numpy.asarray(_a[::1]), a[::1])
_cmp(numpy.asarray(_a[::-1]), a[::-1])
_cmp(numpy.asarray(_a[...]), a[...])
_cmpf(_a, 2)
dim = ()
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[...]), a[...])
_cmpf(_a, 0)
_cmpfV(_a, slice(1))
dim = (5, 4, 3, 2)
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmpf(_a, slice(-1), slice(-1), 10, -10)
_cmpf(_a, slice(-1), slice(-1), -10, slice(-1))
_cmpf(_a, 0, slice(0, -1, -20), -10)
_cmpf(_a, 10)
_cmpf(_a, (10, 0, 0, 0))
_cmpf(_a, -10)
# test with integer
_cmp(numpy.asarray(_a[1]), a[1])
_cmp(numpy.asarray(_a[-1]), a[-1])
_cmp(numpy.asarray(_a[numpy.int64(1)]), a[numpy.int64(1)])
_cmp(numpy.asarray(_a[numpy.int64(-1)]), a[numpy.int64(-1)])
# test with slice
_cmp(numpy.asarray(_a[1:]), a[1:])
_cmp(numpy.asarray(_a[1:2]), a[1:2])
_cmp(numpy.asarray(_a[-1:1]), a[-1:1])
# test with tuple (mix slice, integer, numpy.int64)
_cmp(numpy.asarray(_a[:, :, ::numpy.int64(-1), ::-1]), a[:, :, ::-1, ::-1])
_cmp(numpy.asarray(_a[:, :, numpy.int64(1), -1]), a[:, :, 1, -1])
_cmp(numpy.asarray(_a[:, :, ::-1, ::-1]), a[:, :, ::-1, ::-1])
_cmp(numpy.asarray(_a[:, :, ::-10, ::-10]), a[:, :, ::-10, ::-10])
_cmp(numpy.asarray(_a[:, :, 1, -1]), a[:, :, 1, -1])
_cmp(numpy.asarray(_a[:, :, -1, :]), a[:, :, -1, :])
_cmp(numpy.asarray(_a[:, ::-2, -1, :]), a[:, ::-2, -1, :])
_cmp(numpy.asarray(_a[:, ::-20, -1, :]), a[:, ::-20, -1, :])
_cmp(numpy.asarray(_a[:, ::-2, -1]), a[:, ::-2, -1])
_cmp(numpy.asarray(_a[0, ::-2, -1]), a[0, ::-2, -1])
_cmp(numpy.asarray(_a[-1, -1, -1, -2]), a[-1, -1, -1, -2])
_cmp(numpy.asarray(_a[...]), a[...])
def test_gemm_vector_vector():
a = theano._asarray(numpy.random.rand(5, 1), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray(numpy.random.rand(1, 5), dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
_c = cuda_ndarray.dot(_a, _b)
assert _c.shape == (5, 5)
assert numpy.allclose(_c, numpy.dot(a, b))
_c = cuda_ndarray.dot(_b, _a)
assert _c.shape == (1, 1)
assert numpy.allclose(_c, numpy.dot(b, a))
# ---------------------------------------------------------------------
def test_setitem_matrixscalar0():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray(8, dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set an element to 8
_a[1, 1] = _b
a[1, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[1, 1] = theano._asarray(888, dtype='float32')
a[1, 1] = theano._asarray(888, dtype='float32')
assert numpy.allclose(a, numpy.asarray(_a))
# broadcast a 0
_a[1, 1] = 0
_a[0:2] = 0
_a[1:] = 0
def test_setitem_matrixvector1():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set second column to 8,9
_a[:, 1] = _b
a[:, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[:, 1] = b * 100
a[:, 1] = b * 100
assert numpy.allclose(a, numpy.asarray(_a))
row = theano._asarray([777, 888, 999], dtype='float32')
_a[1, :] = row
a[1, :] = row
assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_matrix_tensor3():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set middle row through cube to 7,8,9
_a[:, 1, 1] = _b
a[:, 1, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[:, 1, 1] = b * 100
a[:, 1, 1] = b * 100
assert numpy.allclose(a, numpy.asarray(_a))
row = theano._asarray([777, 888, 999], dtype='float32')
_a[1, 1, :] = row
a[1, 1, :] = row
assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_from_numpy_error():
pass
def test_setitem_matrix_bad_shape():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
try:
# attempt to assign the ndarray b with setitem
_a[:, 1, 1] = _b
assert False
except ValueError:
# print e
assert True
# test direct transfert from numpy
try:
# attempt to assign the ndarray b with setitem
_a[1, 1, :] = b
assert False
except ValueError:
# print e
assert True
def test_setitem_matrix_bad_ndim():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
try:
# attempt to assign the ndarray b with setitem
_a[:, :, 1] = _b
assert False
except ValueError:
# print e
assert True
# test direct transfert from numpy
try:
# attempt to assign the ndarray b with setitem
_a[1, :, :] = b
assert False
except ValueError:
# print e
assert True
def test_setitem_matrix_bad_type():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8], dtype='float64')
# test direct transfert from numpy
try:
# attempt to assign the ndarray b with setitem
_a[1, :, :] = b
assert False
except TypeError:
# print e
assert True
def test_setitem_assign_to_slice():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# first get a slice of a
_c = _a[:, :, 1]
# set middle row through cube to 7,8,9
# (this corresponds to middle row of matrix _c)
_c[:, 1] = _b
a[:, :, 1][:, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_d = _a[1, :, :]
_d[1, :] = b * 10
a[1, :, :][1, :] = b * 10
assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_broadcast():
# test scalar to vector without stride
a = numpy.arange(3)
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray(9, dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
_a[:] = _b.reshape((1,))
a[:] = b.reshape((1,))
assert numpy.allclose(numpy.asarray(_a), a)
# test vector to matrice without stride
a = numpy.arange(9)
a.resize((3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
_a[:, :] = _b.reshape((1, 3))
a[:, :] = b.reshape((1, 3))
assert numpy.allclose(numpy.asarray(_a), a)
# test vector to matrice with stride
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([[7, 8, 9], [10, 11, 12]], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)[0]
b = b[0]
_a[:, :, 1] = _b.reshape((1, 3))
a[:, :, 1] = b.reshape((1, 3))
assert numpy.allclose(numpy.asarray(_a), a)
def test_setitem_broadcast_numpy():
# test scalar to vector without stride
a = numpy.arange(3)
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray(9, dtype='float32')
_a[:] = b.reshape((1,))
a[:] = b.reshape((1,))
assert numpy.allclose(numpy.asarray(_a), a)
# test vector to matrice without stride
a = numpy.arange(9)
a.resize((3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_a[:, :] = b.reshape((1, 3))
a[:, :] = b.reshape((1, 3))
assert numpy.allclose(numpy.asarray(_a), a)
# test vector to matrice with stride
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([[7, 8, 9], [10, 11, 12]], dtype='float32')
b = b[0]
_a[1, :, :] = b.reshape((1, 3))
a[1, :, :] = b.reshape((1, 3))
assert numpy.allclose(numpy.asarray(_a), a)
# this also fails for the moment
def test_setitem_rightvalue_ndarray_fails():
"""
Now we don't automatically add dimensions to broadcast
"""
a = numpy.arange(3 * 4 * 5)
a.resize((3, 4, 5))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9, 10], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')
cuda_ndarray.CudaNdarray(b)
# attempt to assign the ndarray b with setitem
_a[:, :, 1] = _b
a[:, :, 1] = b
assert numpy.allclose(numpy.asarray(_a), a)
# test direct transfert from numpy to contiguous region
# attempt to assign the ndarray b with setitem
# same number of dim
mat = numpy.random.rand(4, 5).astype('float32')
_a[2, :, :] = mat
a[2, :, :] = mat
assert numpy.allclose(numpy.asarray(_a), a)
# without same number of dim
try:
_a[0, :, :] = mat
# a[0, :, :] = mat
# assert numpy.allclose(numpy.asarray(_a), a)
except ValueError:
pass
# test direct transfert from numpy with broadcast
_a[0, :, :] = b5
a[0, :, :] = b5
assert numpy.allclose(numpy.asarray(_a), a)
# test direct transfert from numpy to not contiguous region
# attempt to assign the ndarray b with setitem
_a[:, :, 2] = b
a[:, :, 2] = b
assert numpy.allclose(numpy.asarray(_a), a)
def test_zeros_basic():
for shp in [(3, 4, 5), (300,), (), (0, 7)]:
_a = cuda_ndarray.CudaNdarray.zeros(shp)
_n = numpy.zeros(shp, dtype="float32")
assert numpy.allclose(numpy.asarray(_a), _n)
assert _a.shape == _n.shape
assert all(_a._strides == numpy.asarray(_n.strides) / 4)
# TODO:The following don't have the same stride!
# This should be fixed with the new GpuNdArray.
for shp in [(3, 0), (4, 1, 5)]:
_a = cuda_ndarray.CudaNdarray.zeros(shp)
_n = numpy.zeros(shp, dtype="float32")
assert numpy.allclose(numpy.asarray(_a), _n)
assert _a.shape == _n.shape
try:
_n = numpy.zeros()
except TypeError:
pass
else:
raise Exception("An error was expected!")
try:
_a = cuda_ndarray.CudaNdarray.zeros()
except TypeError:
pass
else:
raise Exception("An error was expected!")
def test_base():
# Test that the 'base' attribute of a CudaNdarray is the one
# built initially, not an intermediate one.
a = cuda_ndarray.CudaNdarray.zeros((3, 4, 5))
for i in xrange(5):
b = a[:]
assert b.base is a
c = a[0]
d = c[:, 0]
# print d.shape
assert c.base is a
assert d.base is a
e = b.reshape((5, 2, 2, 3))
assert e.base is a
def test_set_strides():
a = cuda_ndarray.CudaNdarray.zeros((5, 5))
# Test with tuple
new_strides = (a.strides[1], a.strides[0])
a.strides = new_strides
assert a.strides == new_strides
# Test with list
new_strides = (a.strides[1], a.strides[0])
a.strides = [a.strides[1], a.strides[0]]
assert a.strides == new_strides
try:
a.strides = (a.strides[1],)
assert False
except ValueError:
pass
try:
a.strides = (1, 1, 1)
assert False
except ValueError:
pass
def test_is_c_contiguous():
a = cuda_ndarray.CudaNdarray.zeros((3, 4, 5))
assert a.is_c_contiguous()
assert a[1].is_c_contiguous()
assert not a[::2].is_c_contiguous()
if __name__ == '__main__':
test_setitem_matrixvector1()
test_setitem_matrix_tensor3()
test_setitem_assign_to_slice()
test_setitem_rightvalue_ndarray_fails()
|
137265
|
from .generic_indicator import GenericIndicator
from pyti.average_true_range import average_true_range as atr
# params: period
# https://github.com/kylejusticemagnuson/pyti/blob/master/pyti/average_true_range.py
class PytiAverageTrueRange(GenericIndicator):
def __init__(self, market, interval, periods, params=None):
super().__init__(market, interval, periods, None, None, params)
def get_analysis(self, data):
return atr(data, self.params['period'])[-1]
def next_calculation(self, candle):
if self.get_datawindow() is not None:
self.value = self.get_analysis(self.get_close())
|
137277
|
import re
from collections import namedtuple
Help = namedtuple('Help', 'options')
opt_pattern = re.compile(r'((--[a-zA-Z\-]+)|(-[a-zA-Z])\b)')
def parse_help(helpstr):
# Contains options, e.g. --help, --verbose, -o
options = [match[1] for match in opt_pattern.finditer(helpstr)]
return Help(options=options)
|
137300
|
from keras_audio.library.utility.audio_utils import compute_melgram
from keras_audio.library.utility.gtzan_loader import download_gtzan_genres_if_not_found
import numpy as np
def load_audio_path_label_pairs(max_allowed_pairs=None):
download_gtzan_genres_if_not_found('../very_large_data/gtzan')
audio_paths = []
with open('../data/lists/test_songs_gtzan_list.txt', 'rt') as file:
for line in file:
audio_path = '../very_large_data/' + line.strip()
audio_paths.append(audio_path)
pairs = []
with open('../data/lists/test_gt_gtzan_list.txt', 'rt') as file:
for line in file:
label = int(line)
if max_allowed_pairs is None or len(pairs) < max_allowed_pairs:
pairs.append((audio_paths[len(pairs)], label))
else:
break
return pairs
def main():
pairs = load_audio_path_label_pairs()
for index, (audio_path, _) in enumerate(pairs):
print('{} / {} ...'.format(index+1, len(pairs)))
mg = compute_melgram(audio_path)
print('max: ', np.max(mg))
print('min: ', np.min(mg))
if __name__ == '__main__':
main()
|
137306
|
import requests
from ..utils.exceptions import BestBuyAPIError
from ..constants import (
API_SEARCH_PARAMS,
BASE_URL,
BULK_API,
STORE_SEARCH_PARAMS,
PRODUCT_SEARCH_PARAMS,
)
class BestBuyCore(object):
def __init__(self, api_key):
"""API's base class
:params:
:api_key (str): best buy developer API key.
"""
self.api_key = api_key.strip()
def _call(self, payload):
"""
Actual call ot the Best Buy API.
:rType:
- JSON
- Text/String
"""
valid_payload = self._validate_params(payload)
url, valid_payload = self._build_url(valid_payload)
request = requests.get(url, params=valid_payload)
if "json" in request.headers["Content-Type"]:
return request.json()
return request.content
def _api_name(self):
return None
def _build_url(self, payload):
"""
Receives a payload (dict) with the necessary params to make a call
to the Best Buy API and returns a string URL that includes the
query and the dict parameters pre-processed for a API call to be
made.
:param paylod: dictionary with request parameters
:rType: tuple that contains the url that includes the query and
the parameters pre-processed for a API call to be made.
"""
query = payload["query"]
# Pre-process paramenters before submitting payload.
out = dict()
for key, value in payload["params"].items():
if isinstance(value, list):
out[key] = ",".join(value)
else:
out[key] = value
# Add key to params
out["apiKey"] = self.api_key
if self._api_name() == BULK_API:
url = BASE_URL + f"{query}"
else:
url = BASE_URL + f"{self._api_name()}({query})"
return (url, out)
def _validate_params(self, payload):
"""
Validate parameters, double check that there are no None values
in the keys.
:param payload: dictionary, with the parameters to be used to make
a request.
"""
for key, value in payload["params"].items():
# TODO: Use a class variable to load the appropiate validation list of params
VALID_PARAMS = API_SEARCH_PARAMS + STORE_SEARCH_PARAMS + PRODUCT_SEARCH_PARAMS
if key not in VALID_PARAMS:
err_msg = "{0} is an invalid Product" " Search Parameter".format(key)
raise BestBuyAPIError(err_msg)
if value is None:
err_msg = "Key {0} can't have None for a value".format(key)
raise BestBuyAPIError(err_msg)
return payload
|
137326
|
from brownie import *
from .settings import *
from .contracts import *
from .contract_addresses import *
import time
def main():
load_accounts()
# Initialise Project
operator = accounts[0]
wallet = accounts[1]
# GP: Split into public and miso access control
access_control = deploy_access_control(operator)
user_access_control = deploy_user_access_control(operator)
# user_access_control = access_control
# Setup MISOTokenFactory
miso_token_factory = deploy_miso_token_factory(access_control)
mintable_token_template = deploy_mintable_token_template()
if miso_token_factory.tokenTemplateId() == 0:
miso_token_factory.addTokenTemplate(
mintable_token_template, {'from': operator})
# Setup MISO Market
bento_box = deploy_bento_box()
crowdsale_template = deploy_crowdsale_template()
dutch_auction_template = deploy_dutch_auction_template()
miso_market = deploy_miso_market(
access_control, [dutch_auction_template, crowdsale_template])
uniswap_factory = deploy_uniswap_factory()
# MISOLauncher
weth_token = deploy_weth_token()
pool_liquidity_template = deploy_pool_liquidity_template()
miso_launcher = deploy_miso_launcher(access_control, weth_token, bento_box)
if miso_launcher.getLiquidityTemplateIndex(0) == ZERO_ADDRESS:
miso_launcher.addLiquidityLauncherTemplate(
pool_liquidity_template, {"from": accounts[0]})
# MISOFarmFactory
masterchef_template = deploy_masterchef_template()
farm_factory = deploy_farm_factory(access_control)
if farm_factory.farmTemplateId() == 0:
farm_factory.addFarmTemplate(
masterchef_template, {"from": accounts[0]})
# Create mintable for testing
recipe_02 = MISORecipe02.deploy(
miso_token_factory,
weth_token,
miso_market,
miso_launcher,
uniswap_factory,
farm_factory,
{"from": accounts[0]}
)
# recipe_02_address = web3.toChecksumAddress(0x3FD2f53bA85345E17aF41e845f1c41014962db5F)
# recipe_02 = MISORecipe02.at(recipe_02_address)
# Access control admin must set the smart contract roles
# user_access_control.addSmartContractRole(recipe_02, {'from': accounts[0]})
name = "Token"
symbol = "TKN"
tokensToMint = 1000 * TENPOW18
tokensToMarket = 200 * TENPOW18
paymentCurrency = ETH_ADDRESS
startTime = chain.time() + 50
endTime = chain.time() + 1000
market_rate = 100
market_goal = 200
launchwindow = 3 * 24 * 60 * 60
deadline = 200
locktime = 100
tokensToLiquidity = 100 * TENPOW18
# Create new Farm
rewards_per_block = 1 * TENPOW18
# Define the start time relative to sales
start_block = len(chain) + 10
dev_addr = wallet
tokensToFarm = 100 * TENPOW18
alloc_point = 10
integratorFeeAccount = accounts[1]
tx = recipe_02.prepareMiso(
name,
symbol,
user_access_control,
tokensToMint,
tokensToMarket,
paymentCurrency,
startTime,
endTime,
market_rate,
market_goal,
wallet,
operator,
deadline,
launchwindow,
locktime,
tokensToLiquidity,
rewards_per_block,
start_block,
dev_addr,
tokensToFarm,
alloc_point,
integratorFeeAccount, {'from': accounts[0]}
)
time.sleep(1)
print("tx events: " + str(tx.events))
|
137372
|
from .abstract import AbstractHealthcheck
class DummyHealthcheck(AbstractHealthcheck):
status = True
def __init__(self, config=None):
pass
async def is_healthy(self, vm):
return self.status
def set_status(self, healthy):
self.status = healthy
|
137375
|
from .xframeoptionsdirective import XFrameOptionsDirective
from .xframeoptions import XFrameOptions
__all__ = ['XFrameOptionsDirective','XFrameOptions']
|
137377
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.unittest import TestCase
class ValidationMessagesTest(TestCase):
def test_autofield_field_raises_error_message(self):
f = models.AutoField(primary_key=True)
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [u"'foo' value must be an integer."])
# primary_key must be True. Refs #12467.
self.assertRaises(AssertionError, models.AutoField, 'primary_key', False)
try:
models.AutoField(primary_key=False)
except AssertionError, e:
self.assertEqual(str(e), "AutoFields must have primary_key=True.")
def test_integer_field_raises_error_message(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [u"'foo' value must be an integer."])
def test_boolean_field_raises_error_message(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages,
[u"'foo' value must be either True or False."])
def test_float_field_raises_error_message(self):
f = models.FloatField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [u"'foo' value must be a float."])
def test_decimal_field_raises_error_message(self):
f = models.DecimalField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages,
[u"'foo' value must be a decimal number."])
def test_null_boolean_field_raises_error_message(self):
f = models.NullBooleanField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages,
[u"'foo' value must be either None, True or False."])
def test_date_field_raises_error_message(self):
f = models.DateField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid date format. "
u"It must be in YYYY-MM-DD format."])
self.assertRaises(ValidationError, f.clean, 'aaaa-10-10', None)
try:
f.clean('aaaa-10-10', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'aaaa-10-10' value has an invalid date format. "
u"It must be in YYYY-MM-DD format."])
self.assertRaises(ValidationError, f.clean, '2011-13-10', None)
try:
f.clean('2011-13-10', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-13-10' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."])
self.assertRaises(ValidationError, f.clean, '2011-10-32', None)
try:
f.clean('2011-10-32', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-10-32' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."])
def test_datetime_field_raises_error_message(self):
f = models.DateTimeField()
# Wrong format
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid format. It must be "
u"in YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."])
# Correct format but invalid date
self.assertRaises(ValidationError, f.clean, '2011-10-32', None)
try:
f.clean('2011-10-32', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-10-32' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."])
# Correct format but invalid date/time
self.assertRaises(ValidationError, f.clean, '2011-10-32 10:10', None)
try:
f.clean('2011-10-32 10:10', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-10-32 10:10' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."])
def test_time_field_raises_error_message(self):
f = models.TimeField()
# Wrong format
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."])
# Correct format but invalid time
self.assertRaises(ValidationError, f.clean, '25:50', None)
try:
f.clean('25:50', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'25:50' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."])
|
137384
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def vis_causal_net(adata, key='RDI', layout = 'circular', top_n_edges = 10, edge_color = 'gray', figsize=(6, 6)):
"""Visualize inferred causal regulatory network
This plotting function visualize the inferred causal regulatory network inferred from Scribe.
Arguments
---------
adata: `Anndata`
Annotated Data Frame, an Anndata object.
key: `str` (default: `RDI`)
The key points to the type of causal network to be used for network visualization.
layout: `str` (Default: circular)
A string determines the graph layout function supported by networkx. Currently supported layouts include
circular, kamada_kawai, planar, random, spectral, spring and shell.
top_n_edges: 'int' (default 10)
Number of top strongest causal regulation to visualize.
edge_color: `str` (Default: gray)
The color for the graph edge.
figsize: `tuple` (Default: (6, 6))
The tuple of the figure width and height.
Returns
-------
A figure created by nx.draw and matplotlib.
"""
if 'causal_net' not in adata.uns.keys():
raise('causal_net is not a key in uns slot. Please first run causal network inference with Scribe.')
df_mat = adata.uns['causal_net'][key]
ind_mat = np.where(df_mat.values - df_mat.T.values < 0)
tmp = np.where(df_mat.values - df_mat.T.values < 0)
for i in range(len(tmp[0])):
df_mat.iloc[tmp[0][i], tmp[1][i]] = np.nan
df_mat = df_mat.stack().reset_index()
df_mat.columns = ['source', 'target', 'weight']
if top_n_edges is not None:
ind_vec = np.argsort(-df_mat.loc[:, 'weight'])
df_mat = df_mat.loc[ind_vec[:top_n_edges], :]
G = nx.from_pandas_edgelist(df_mat, source='source', target='target', edge_attr='weight', create_using=nx.DiGraph())
G.nodes()
W = []
for n, nbrs in G.adj.items():
for nbr, eattr in nbrs.items():
W.append(eattr['weight'])
options = {
'width': 300,
'arrowstyle': '-|>',
'arrowsize': 1000,
}
plt.figure(figsize=figsize)
if layout is None:
nx.draw(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "circular":
nx.draw_circular(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "kamada_kawai":
nx.draw_kamada_kawai(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "planar":
nx.draw_planar(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "random":
nx.draw_random(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "spectral":
nx.draw_spectral(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "spring":
nx.draw_spring(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
elif layout is "shell":
nx.draw_shell(G, with_labels=True, node_color='skyblue', node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options = options)
else:
raise('layout', layout, ' is not supported.')
plt.show()
|
137388
|
from collections import namedtuple
import numpy as np
import pytest
from numpy.testing import assert_allclose
from pytest_lazyfixture import lazy_fixture
from emukit.quadrature.methods.warpings import IdentityWarping, SquareRootWarping
def create_fixture_parameters():
return [pytest.param(lazy_fixture(warping.name), id=warping.name) for warping in warpings]
@pytest.fixture
def identity_warping():
return IdentityWarping()
@pytest.fixture
def squarerroot_warping():
offset = 1.0
return SquareRootWarping(offset=offset)
@pytest.fixture
def inverted_squarerroot_warping():
offset = 1.0
return SquareRootWarping(offset=offset, is_inverted=True)
warpings_tuple = namedtuple("WarpingTest", ["name"])
warpings = [
warpings_tuple("identity_warping"),
warpings_tuple("squarerroot_warping"),
warpings_tuple("inverted_squarerroot_warping"),
]
RTOL = 1e-8
ATOL = 1e-6
@pytest.mark.parametrize("warping", create_fixture_parameters())
def test_warping_shapes(warping):
Y = np.ones([5, 1])
assert warping.transform(Y).shape == Y.shape
assert warping.inverse_transform(Y).shape == Y.shape
@pytest.mark.parametrize("warping", create_fixture_parameters())
def test_warping_values(warping):
np.random.seed(42)
Y = np.random.rand(5, 1)
assert_allclose(warping.inverse_transform(warping.transform(Y)), Y, rtol=RTOL, atol=ATOL)
def test_squarerroot_warping_update_parameters(squarerroot_warping, inverted_squarerroot_warping):
new_offset = 10.0
squarerroot_warping.update_parameters(offset=new_offset)
assert squarerroot_warping.offset == new_offset
inverted_squarerroot_warping.update_parameters(offset=new_offset)
assert inverted_squarerroot_warping.offset == new_offset
def test_squarerroot_warping_inverted_flag(squarerroot_warping, inverted_squarerroot_warping):
assert not squarerroot_warping.is_inverted
assert inverted_squarerroot_warping.is_inverted
|
137404
|
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
def hello_world(request):
return Response('Hello World!')
if __name__ == '__main__':
with Configurator() as config:
config.add_route('hello', '/')
config.add_view(hello_world, route_name='hello')
app = config.make_wsgi_app()
print('server is running')
server = make_server('0.0.0.0', 8099, app)
server.serve_forever()
|
137411
|
import copy
import os
import tempfile
import tarfile
import pytest
import torch
from allennlp.version import _MAJOR, _MINOR
from allennlp.commands.train import train_model
from allennlp.common import Params
from allennlp.common.meta import Meta
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import DatasetReader
from allennlp.models.archival import (
archive_model,
load_archive,
CONFIG_NAME,
_check_version_compatibility,
)
def assert_models_equal(model, model2):
# check that model weights are the same
keys = set(model.state_dict().keys())
keys2 = set(model2.state_dict().keys())
assert keys == keys2
for key in keys:
assert torch.equal(model.state_dict()[key], model2.state_dict()[key])
# check that vocabularies are the same
vocab = model.vocab
vocab2 = model2.vocab
assert vocab._token_to_index == vocab2._token_to_index
assert vocab._index_to_token == vocab2._index_to_token
def _test_check_version_compatibility():
meta = Meta(version=f"{_MAJOR}.{int(_MINOR) + 1}.0")
with pytest.warns(UserWarning, match="trained on a newer version"):
_check_version_compatibility("model.tar.gz", meta)
meta = Meta(version="1.2.0")
with pytest.warns(UserWarning, match="trained on version"):
_check_version_compatibility("model.tar.gz", meta)
class ArchivalTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam", "cuda_device": -1},
}
)
def test_archiving(self):
# copy params, since they'll get consumed during training
params_copy = self.params.duplicate()
params_dict_copy = copy.deepcopy(self.params.as_dict())
# `train_model` should create an archive
serialization_dir = self.TEST_DIR / "archive_test"
model = train_model(self.params, serialization_dir=serialization_dir)
archive_path = serialization_dir / "model.tar.gz"
# load from the archive
archive = load_archive(archive_path)
model2 = archive.model
assert_models_equal(model, model2)
assert isinstance(
archive.dataset_reader,
type(DatasetReader.from_params(params_copy["dataset_reader"].duplicate())),
)
assert isinstance(
archive.validation_dataset_reader,
type(DatasetReader.from_params(params_copy["dataset_reader"].duplicate())),
) # validation_dataset_reader is not in the config, so fall back to dataset_reader
# check that params are the same
params2 = archive.config
assert params2.as_dict() == params_dict_copy
def test_archive_model_uses_archive_path(self):
serialization_dir = self.TEST_DIR / "serialization"
# Train a model
train_model(self.params, serialization_dir=serialization_dir)
# Use a new path.
archive_model(
serialization_dir=serialization_dir, archive_path=serialization_dir / "new_path.tar.gz"
)
archive = load_archive(serialization_dir / "new_path.tar.gz")
assert archive
def test_loading_serialization_directory(self):
# copy params, since they'll get consumed during training
params_dict_copy = copy.deepcopy(self.params.as_dict())
# `train_model` should create an archive
serialization_dir = self.TEST_DIR / "serialization"
model = train_model(self.params, serialization_dir=serialization_dir)
# load from the serialization directory itself
archive = load_archive(serialization_dir)
model2 = archive.model
assert_models_equal(model, model2)
# check that params are the same
params2 = archive.config
assert params2.as_dict() == params_dict_copy
def test_can_load_from_archive_model(self):
serialization_dir = self.FIXTURES_ROOT / "basic_classifier" / "from_archive_serialization"
archive_path = serialization_dir / "model.tar.gz"
model = load_archive(archive_path).model
# We want to be sure that we don't just not crash, but also be sure that we loaded the right
# weights for the model. We'll do that by making sure that we didn't just load the model
# that's in the `archive_path` of the config file, which is this one.
base_model_path = self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
base_model = load_archive(base_model_path).model
base_model_params = dict(base_model.named_parameters())
for name, parameters in model.named_parameters():
if parameters.size() == base_model_params[name].size():
assert not (parameters == base_model_params[name]).all()
else:
# In this case, the parameters are definitely different, no need for the above
# check.
pass
def test_include_in_archive(self):
self.params["include_in_archive"] = ["metrics_epoch_*.json"]
serialization_dir = self.TEST_DIR / "serialization"
# Train a model
train_model(self.params, serialization_dir=serialization_dir)
# Assert that the additional targets were archived
with tempfile.TemporaryDirectory() as tempdir:
with tarfile.open(serialization_dir / "model.tar.gz", "r:gz") as archive:
archive.extractall(tempdir)
assert os.path.isfile(os.path.join(tempdir, "metrics_epoch_0.json"))
assert os.path.isfile(os.path.join(tempdir, "metrics_epoch_1.json"))
assert not os.path.isfile(os.path.join(tempdir, "metrics.json"))
def test_invalid_include_in_archive(self):
self.params["include_in_archive"] = [CONFIG_NAME]
serialization_dir = self.TEST_DIR / "serialization"
with pytest.raises(ConfigurationError) as exc:
train_model(self.params, serialization_dir=serialization_dir)
assert "are saved names and cannot be used" in str(exc.value)
|
137454
|
import unittest
import hcl2
from checkov.terraform.checks.resource.linode.user_email_set import check
from checkov.common.models.enums import CheckResult
class Testuser_email_set(unittest.TestCase):
def test_success(self):
hcl_res = hcl2.loads("""
resource "linode_user" "test" {
email="<EMAIL>"
}
""")
resource_conf = hcl_res['resource'][0]['linode_user']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_failure(self):
hcl_res = hcl2.loads("""
resource "linode_user" "test" {
}
""")
resource_conf = hcl_res['resource'][0]['linode_user']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
if __name__ == '__main__':
unittest.main()
|
137471
|
import asyncio
import functools
import secrets
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import TimeoutError as FutureTimeoutError
from logging import Logger, getLogger
from typing import TYPE_CHECKING, Any, Dict, Tuple
from urllib.parse import urlparse
from broadcaster._backends.base import BroadcastBackend
from google.cloud import pubsub_v1
from google.cloud.pubsub_v1.types import PullResponse, ReceivedMessage
from asyncapi import (
GCloudPubSubConsumerDisconnectError,
GCloudPubSubPublishTimeoutError,
)
from .. import Event
if TYPE_CHECKING:
AsyncFutureHint = asyncio.Future[PullResponse]
else:
AsyncFutureHint = asyncio.Future
class GCloudPubSubBackend(BroadcastBackend):
def __init__(
self,
url: str,
bindings: Dict[str, str] = {},
logger: Logger = getLogger(__name__),
):
url_parsed = urlparse(url, scheme='gcloud-pubsub')
self._project = url_parsed.netloc
self._consumer_channels: Dict[str, str] = {}
self._producer_channels: Dict[str, str] = {}
self._channel_index = 0
self._logger = logger
self._set_consumer_config(bindings)
self._disconnected = True
self._executor = ThreadPoolExecutor(self._consumer_max_workers)
async def connect(self) -> None:
self._producer = pubsub_v1.PublisherClient()
self._consumer = pubsub_v1.SubscriberClient()
self._disconnected = False
async def disconnect(self) -> None:
self._disconnected = True
self._producer.stop()
self._consumer.close()
del self._producer
del self._consumer
async def subscribe(self, channel: str) -> None:
pubsub_channel = self._consumer.subscription_path(
self._project, channel
)
self._consumer_channels[channel] = pubsub_channel
async def unsubscribe(self, channel: str) -> None:
self._consumer_channels.pop(channel)
async def publish(
self, channel: str, message: Any, retries_counter: int = 1
) -> None:
producer_channel = self._producer_channels.get(channel)
if not producer_channel:
producer_channel = self._producer.topic_path(
self._project, channel
)
self._producer_channels[channel] = producer_channel
future = self._producer.publish(producer_channel, message.encode())
try:
future.result(timeout=self._publish_timeout)
except FutureTimeoutError:
if retries_counter >= self._publish_retries:
raise GCloudPubSubPublishTimeoutError(
f'publish timeout; channel={channel}; '
f'message={message[:100]}...'
)
else:
await self.publish(channel, message, retries_counter + 1)
async def next_published(self) -> Event:
(
received_message,
channel_id,
pubsub_channel,
) = await self._pull_message_from_consumer()
event = Event(channel_id, received_message.message.data.decode())
if self._consumer_ack_messages:
await self.wait_ack(received_message, pubsub_channel)
else:
event.context['ack_func'] = functools.partial(
self.wait_ack, received_message, pubsub_channel,
)
return event
async def _pull_message_from_consumer(
self,
) -> Tuple[ReceivedMessage, str, str]:
channel_index = (
secrets.choice(range(len(self._consumer_channels)))
if self._consumer_channels
else 0
)
while not self._disconnected:
channels = list(self._consumer_channels.items())
if not len(channels):
await asyncio.sleep(self._consumer_wait_time)
continue
if channel_index >= len(channels):
channel_index = 0
channel_id, pubsub_channel = channels[channel_index]
pull_message_future: AsyncFutureHint
pull_message_future = asyncio.get_running_loop().run_in_executor( # type: ignore
self._executor,
functools.partial(
self._consumer.pull,
pubsub_channel,
max_messages=1,
return_immediately=True,
),
)
try:
response = await asyncio.wait_for(
pull_message_future, self._consumer_pull_message_timeout
)
except asyncio.TimeoutError:
channel_index += 1
await asyncio.sleep(self._consumer_wait_time)
continue
else:
if not response.received_messages:
channel_index += 1
await asyncio.sleep(self._consumer_wait_time)
continue
await asyncio.sleep(self._pull_message_wait_time)
return (
response.received_messages[0],
channel_id,
pubsub_channel,
)
raise GCloudPubSubConsumerDisconnectError()
async def wait_ack(
self,
message: ReceivedMessage,
pubsub_channel: str,
retries_counter: int = 1,
) -> None:
future = asyncio.get_running_loop().run_in_executor(
self._executor,
functools.partial(
self._consumer.acknowledge, pubsub_channel, [message.ack_id],
),
)
try:
await asyncio.wait_for(future, timeout=self._consumer_ack_timeout)
except asyncio.TimeoutError:
if retries_counter >= self._consumer_ack_retries:
self._logger.warning(
f'ack timeout {self._consumer_ack_timeout}; '
f'message={message.message.data.decode()[:100]}...'
)
else:
await self.wait_ack(
message, pubsub_channel, retries_counter + 1
)
except asyncio.CancelledError:
self._logger.warning(
'ack cancelled; '
f'message={message.message.data.decode()[:100]}...'
)
def _set_consumer_config(self, bindings: Dict[str, str]) -> None:
consumer_wait_time = 1.0
consumer_ack_messages = False
consumer_ack_timeout = 1.0
consumer_ack_retries = 3
consumer_pull_message_timeout = 1.0
consumer_max_workers = 10
publish_timeout = 5.0
publish_retries = 3
pull_message_wait_time = 0.1
for config_name, config_value in bindings.items():
if config_name == 'consumer_wait_time':
consumer_wait_time = float(config_value)
elif config_name == 'consumer_ack_messages':
consumer_ack_messages = config_value in (
'1',
'true',
't',
'True',
'y',
'yes',
)
elif config_name == 'consumer_ack_timeout':
consumer_ack_timeout = float(config_value)
elif config_name == 'consumer_ack_retries':
consumer_ack_retries = int(config_value)
elif config_name == 'consumer_max_workers':
consumer_max_workers = int(config_value)
elif config_name == 'consumer_pull_message_timeout':
consumer_pull_message_timeout = float(config_value)
elif config_name == 'publish_timeout':
publish_timeout = float(config_value)
elif config_name == 'publish_retries':
publish_retries = int(config_value)
elif config_name == 'pull_message_wait_time':
pull_message_wait_time = float(config_value)
self._consumer_wait_time = consumer_wait_time
self._consumer_ack_messages = consumer_ack_messages
self._consumer_ack_timeout = consumer_ack_timeout
self._consumer_ack_retries = consumer_ack_retries
self._consumer_pull_message_timeout = consumer_pull_message_timeout
self._consumer_max_workers = consumer_max_workers
self._publish_timeout = publish_timeout
self._publish_retries = publish_retries
self._pull_message_wait_time = pull_message_wait_time
|
137514
|
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pykitti
import torch
import torchvision.transforms.functional as F
from torch import hub
from torchvision.datasets import Cityscapes
from autolabeling import autolabel
from autolabeling.classes import get_lidar_colormap
from lilanet.utils import colorize_seg
def get_cityscapes_colormap():
cmap = torch.zeros([256, 3], dtype=torch.uint8)
for cls in Cityscapes.classes:
cmap[cls.id, :] = torch.tensor(cls.color)
return cmap
def convert_train_id_to_id(target):
target_copy = target.clone()
for cls in Cityscapes.classes:
target_copy[target == cls.train_id] = cls.id
return target_copy
def show_lidar_on_image(points, image, segmentation, T_cam0, K_cam0):
points_2d = autolabel.pinhole_projection(points, T_cam0, K_cam0)
cmap = get_cityscapes_colormap()
segmentation = convert_train_id_to_id(segmentation)
vis = colorize_seg(segmentation.cpu(), cmap)
height, width = segmentation.shape
for i in range(points.shape[0]):
img_x = points_2d[i, 0]
img_y = points_2d[i, 1]
img_x = np.clip(img_x, 0, width - 1)
img_y = np.clip(img_y, 0, height - 1)
color = vis[:, img_y, img_x].tolist()
cv2.circle(image, (img_x, img_y), 2, color=tuple(color), thickness=-1)
return image
def show_lidar_depth_on_image(pc_velo, img, T_cam0, K_cam0):
points_2d = autolabel.pinhole_projection(pc_velo, T_cam0, K_cam0)
cmap = plt.cm.get_cmap('hsv', 256)
cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255
for i in range(pc_velo.shape[0]):
depth = np.sqrt(pc_velo[i, 0] ** 2 + pc_velo[i, 1] ** 2 + pc_velo[i, 2] ** 2)
idx = np.clip(int(640.0 / depth), 0, 255)
color = cmap[idx, :]
img_x = points_2d[i, 0]
img_y = points_2d[i, 1]
cv2.circle(img, (img_x, img_y), 2, color=tuple(color), thickness=-1)
return img
def plot_images(file_name, distance, reflectivity, label, segmentation, img, proj_img):
cmap = get_lidar_colormap()
cs_cmap = get_cityscapes_colormap()
def _normalize(x):
return (x - x.min()) / (x.max() - x.min())
distance_map = F.to_pil_image(_normalize(distance.squeeze()))
reflectivity_map = F.to_pil_image(_normalize(reflectivity.squeeze()))
label_map = F.to_pil_image(colorize_seg(label.squeeze(), cmap).cpu())
segmentation = convert_train_id_to_id(segmentation)
segmentation_map = F.to_pil_image(colorize_seg(segmentation.squeeze(), cs_cmap).cpu())
fig = plt.figure(figsize=(10, 5))
plt.subplot(231)
plt.title("Camera Image")
plt.imshow(img)
plt.subplot(232)
plt.title("Semantic Image")
plt.imshow(segmentation_map)
plt.subplot(233)
plt.title("Semantic Transfer")
plt.imshow(proj_img)
plt.subplot(234)
plt.title("Distance")
plt.imshow(distance_map)
plt.subplot(235)
plt.title("Reflectivity")
plt.imshow(reflectivity_map)
plt.subplot(236)
plt.title("Label")
plt.imshow(label_map)
plt.tight_layout()
plt.show()
fig.savefig(file_name, dpi=200)
if __name__ == '__main__':
torch.cuda.empty_cache()
basedir = '../data/kitti_raw'
date = '2011_09_26'
drive = '0005'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
dataset = pykitti.raw(basedir, date, drive)
idx = 16
file_name = "{}_{}_{}.png".format(date, drive, os.path.basename(dataset.cam2_files[idx])[:-4])
model = hub.load('TheCodez/pytorch-GoogLeNet-FCN', 'googlenet_fcn', pretrained='cityscapes')
model = model.to(device)
model.eval()
img = dataset.get_cam2(idx)
pc_velo = dataset.get_velo(idx)
print("Inference")
pred = autolabel.semantic_segmentation(model, img, device)
pc_velo = autolabel.get_points_in_fov_90(pc_velo)
print("Transferring labels")
pc_labels = autolabel.transfer_labels(pc_velo, pred, dataset.calib.T_cam0_velo, dataset.calib.K_cam0)
print("Spherical projection")
lidar = autolabel.spherical_projection(pc_labels)
proj_img = show_lidar_on_image(pc_velo, np.array(img), pred, dataset.calib.T_cam0_velo, dataset.calib.K_cam0)
record = torch.as_tensor(lidar, dtype=torch.float32).permute(2, 0, 1).contiguous()
reflectivity = record[3, :, :]
distance = record[4, :, :]
label = record[5, :, :]
plot_images(file_name, distance, reflectivity, label, pred, img, proj_img)
|
137519
|
import arrayfire as af
import typing as tp
from ._array import ndarray, _wrap_af_array
def count_nonzero(a: ndarray,
axis: tp.Optional[int] = None) \
-> tp.Union[int, ndarray]:
return _wrap_af_array(af.count(a._af_array, dim=axis))
def diff(a: ndarray,
n: int = 1,
axis: int = -1) -> ndarray:
"""Calculate the n-th discrete difference along given axis."""
if axis == -1:
# use last axis
axis = a.ndim - 1
if 0 <= axis <= 3:
if axis >= a.ndim:
raise ValueError("axis exceeds array dimension")
if n >= a.shape[axis]:
raise ValueError(f"input array has length {a.shape[axis]} in "
f"dimension {axis} and therefore cannot be "
f"differentiated more than {a.shape[axis] - 1} "
f"times")
if n == 0:
return a.copy()
elif n == 1:
new_array = af.diff1(a._af_array, dim=axis)
elif n == 2:
new_array = af.diff2(a._af_array, dim=axis)
elif n > 2:
output = a
while n >= 2:
n -= 2
output = ndarray(af.diff2(output._af_array, dim=axis))
if n == 1:
output = ndarray(af.diff1(output._af_array, dim=axis))
return output
else:
raise ValueError(f"n must be positive but is {n}")
else:
raise ValueError("Axis must be between 0 and 3")
return ndarray(new_array)
def flatnonzero(a: ndarray) -> ndarray:
return ndarray(af.where(a._af_array))
def sort_by_keys(keys: ndarray,
values: ndarray,
axis: int = -1,
ascending: bool = True) -> tp.Tuple[ndarray, ndarray]:
if keys.shape != values.shape:
raise ValueError("Keys and values must have the same dimensions.")
elif axis is None:
keys = keys.flatten()
values = values.flatten()
elif axis == -1:
axis = keys.ndim - 1
elif axis >= keys.ndim:
raise ValueError(f"Parameter axis must be between -1 and "
f"{keys.ndim - 1}")
ordered_values, ordered_keys \
= af.sort_by_key(values._af_array,
keys._af_array,
is_ascending=ascending)
return ndarray(ordered_keys), ndarray(ordered_values)
def unique(ar: ndarray,
return_index: bool = False,
return_inverse: bool = False,
return_counts: bool = False) -> ndarray:
if return_index:
raise ValueError("return_index=True is not supported")
if return_inverse:
raise ValueError("return_inverse=True is not supported")
if return_counts:
raise ValueError("return_counts=True is not supported")
unsorted_unique_set_af_array = af.set_unique(ar._af_array,
is_sorted=False)
sorted_unique_set_af_array = af.sort(unsorted_unique_set_af_array,
dim=0,
is_ascending=True)
return ndarray(sorted_unique_set_af_array)
def union1d(ar1: ndarray, ar2: ndarray) -> ndarray:
new_af_array = af.set_union(ar1._af_array,
ar2._af_array,
is_unique=False)
return ndarray(new_af_array)
def intersect1d(ar1: ndarray, ar2: ndarray) -> ndarray:
new_af_array = af.set_intersect(ar1._af_array,
ar2._af_array,
is_unique=False)
return ndarray(new_af_array)
|
137520
|
from unittest import TestCase
class TestDukeMTMC(TestCase):
def test_all(self):
import os.path as osp
from reid.datasets import DukeMTMC
from reid.utils.serialization import read_json
root, split_id, num_val = '/tmp/open-reid/dukemtmc', 0, 100
dataset = DukeMTMC(root, split_id=split_id, num_val=num_val,
download=True)
self.assertTrue(osp.isfile(osp.join(root, 'meta.json')))
self.assertTrue(osp.isfile(osp.join(root, 'splits.json')))
meta = read_json(osp.join(root, 'meta.json'))
self.assertEquals(len(meta['identities']), 1812)
splits = read_json(osp.join(root, 'splits.json'))
self.assertEquals(len(splits), 1)
self.assertDictEqual(meta, dataset.meta)
self.assertDictEqual(splits[split_id], dataset.split)
|
137539
|
from __future__ import absolute_import, division, print_function
import cctbx.xray.targets
from cctbx.array_family import flex
from libtbx.test_utils import approx_equal
from six.moves import range
def calc_k(f_obs, i_calc):
fc = flex.sqrt(i_calc)
num = flex.sum(f_obs * fc)
den = flex.sum(fc * fc)
assert den != 0
k = num / den
return k
def calc_w(wa, wb, i_obs, i_sig, i_calc, k):
assert i_sig.size() == i_obs.size()
assert i_calc.size() == i_obs.size()
ik = i_obs / k**2
sk = i_sig / k**2
ik.set_selected(ik < 0, 0)
p = (ik + 2 * i_calc) / 3
den = flex.pow2(sk) + flex.pow2(wa*p) + wb*p
assert den.all_gt(1e-8)
weights = 1 / den
return weights
def calc_t(i_obs, i_calc, k, weights):
delta = i_obs - k**2 * i_calc
t_num = flex.sum(weights * flex.pow2(delta))
t_den = flex.sum(weights * flex.pow2(i_obs))
assert t_den != 0
return t_num / t_den
def kwt(f_obs, i_obs, i_sig, f_calc, i_calc, wa, wb):
if (f_calc is not None):
i_calc = flex.norm(f_calc)
k = calc_k(f_obs, i_calc)
weights = calc_w(
wa=wa,
wb=wb,
i_obs=i_obs,
i_sig=i_sig,
i_calc=i_calc,
k=k)
t = calc_t(
i_obs=i_obs,
i_calc=i_calc,
k=k,
weights=weights)
return k, weights, t
def kwt2(f_obs, i_obs, i_sig, f_calc, i_calc, wa, wb):
k, weights, t = kwt(f_obs, i_obs, i_sig, f_calc, i_calc, wa, wb)
trg = cctbx.xray.targets.shelxl_wght_ls(
f_obs=f_obs,
i_obs=i_obs,
i_sig=i_sig,
f_calc=f_calc,
i_calc=i_calc,
wa=wa,
wb=wb)
assert approx_equal(trg.scale_factor, k)
assert approx_equal(trg.weights, weights)
assert approx_equal(trg.target, t)
return trg
def exercise(mt, n_refl):
f_obs = mt.random_double(size=n_refl)
i_obs = flex.pow2(f_obs)
i_sig = mt.random_double(size=i_obs.size())
f_calc = flex.complex_double(
mt.random_double(size=f_obs.size()),
mt.random_double(size=f_obs.size()))
i_calc = flex.norm(f_calc)
wa = 1.23
wb = 2.34
trg = kwt2(
f_obs=f_obs, i_obs=i_obs, i_sig=i_sig,
f_calc=f_calc, i_calc=None, wa=wa, wb=wb)
def check_i_derivs():
g_ana = trg.i_gradients
c_ana = trg.i_curvatures
eps = 1e-6
g_fin = flex.double()
c_fin = flex.double()
for ih in range(i_calc.size()):
fs = []
gs = []
c_orig = i_calc[ih]
for signed_eps in [eps, -eps]:
i_calc[ih] = c_orig + signed_eps
trg_eps = kwt2(
f_obs=f_obs, i_obs=i_obs, i_sig=i_sig,
f_calc=None, i_calc=i_calc, wa=wa, wb=wb)
fs.append(trg_eps.target)
gs.append(trg_eps.i_gradients[ih])
g_fin.append((fs[0]-fs[1])/(2*eps))
c_fin.append((gs[0]-gs[1])/(2*eps))
i_calc[ih] = c_orig
assert approx_equal(g_ana, g_fin)
assert approx_equal(c_ana, c_fin)
def check_f_derivs():
g_ana = trg.f_gradients
c_ana = trg.f_hessians
eps = 1e-6
g_fin = flex.complex_double()
c_fin = flex.vec3_double()
for ih in range(i_calc.size()):
c_orig = f_calc[ih]
g_fin_ab = []
c_fin_ab = []
for iab in [0,1]:
fs = []
gs = []
for signed_eps in [eps, -eps]:
if (iab == 0):
f_calc[ih] = complex(c_orig.real + signed_eps, c_orig.imag)
else:
f_calc[ih] = complex(c_orig.real, c_orig.imag + signed_eps)
trg_eps = kwt2(
f_obs=f_obs, i_obs=i_obs, i_sig=i_sig,
f_calc=f_calc, i_calc=None, wa=wa, wb=wb)
fs.append(trg_eps.target)
gs.append(trg_eps.f_gradients[ih])
g_fin_ab.append((fs[0]-fs[1])/(2*eps))
c_fin_ab.append((gs[0]-gs[1])/(2*eps))
g_fin.append(complex(*g_fin_ab))
assert approx_equal(c_fin_ab[0].imag, c_fin_ab[1].real)
c_fin.append((c_fin_ab[0].real, c_fin_ab[1].imag, c_fin_ab[0].imag))
f_calc[ih] = c_orig
assert approx_equal(g_ana, g_fin)
assert approx_equal(c_ana, c_fin)
check_i_derivs()
check_f_derivs()
def run(args):
assert len(args) < 3
arg_vals = [int(arg) for arg in args]
arg_vals = arg_vals + [3, 2][len(arg_vals):]
n_refl, n_trials = arg_vals
assert n_refl > 0
assert n_trials > 0
mt = flex.mersenne_twister(seed=0)
for i_trial in range(n_trials):
exercise(mt, n_refl)
print("OK")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
137553
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.IsolationAlgos.tkIsoDeposits_cff import *
EcalIsolationForTracks = cms.EDProducer("IsolationProducerForTracks",
highPtTracks = cms.InputTag("highPtTracks"),
tracks = cms.InputTag("goodTracks"),
isoDeps = cms.InputTag("tkIsoDepositCalByAssociatorTowers","ecal"),
coneSize = cms.double(0.3),
trackPtMin = cms.double(20.0)
)
HcalIsolationForTracks = cms.EDProducer("IsolationProducerForTracks",
highPtTracks = cms.InputTag("highPtTracks"),
tracks = cms.InputTag("goodTracks"),
isoDeps = cms.InputTag("tkIsoDepositCalByAssociatorTowers","hcal"),
coneSize = cms.double(0.3),
trackPtMin = cms.double(20.0)
)
highPtTrackIsolations = cms.Sequence(tkIsoDeposits+EcalIsolationForTracks+HcalIsolationForTracks)
|
137673
|
import os
import sys
import mxnet as mx
def cifar100_iterator(cfg, kv):
train_rec = os.path.join(cfg.dataset.data_dir, "cifar100_train.rec")
val_rec = os.path.join(cfg.dataset.data_dir, "cifar100_test.rec")
mean = [129.31, 124.11, 112.4]
std = [68.21, 65.41, 70.41]
train = mx.io.ImageRecordIter(
path_imgrec = train_rec,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32),
batch_size = cfg.batch_size,
pad = 4,
fill_value = 127,
#mean_r = mean[0],
#mean_g = mean[1],
#mean_b = mean[2],
#std_r = std[0],
#std_g = std[1],
#std_b = std[2],
rand_crop = True if cfg.dataset.aug_level > 0 else False,
rand_mirror = True if cfg.dataset.aug_level > 0 else False,
shuffle = True if cfg.dataset.aug_level >= 0 else False,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = val_rec,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = cfg.batch_size,
data_shape = (3, 32, 32),
mean_r = mean[0],
#mean_g = mean[1],
#mean_b = mean[2],
#std_r = std[0],
#std_g = std[1],
#std_b = std[2],
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
return train, val
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.