id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
146879
|
import re
# https://developer.mozilla.org/en-US/docs/Web/HTML/Inline_elements#Elements
INLINE_TAGS = {
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'br', 'button', 'cite',
'code', 'dfn', 'em', 'i', 'img', 'input', 'kbd', 'label', 'map',
'object', 'q', 'samp', 'script', 'select', 'small', 'span', 'strong',
'sub', 'sup', 'textarea', 'time', 'tt', 'var'
}
SEPARATORS = {'br'}
# Definition of whitespace in HTML:
# https://www.w3.org/TR/html4/struct/text.html#h-9.1
WHITESPACE_RE = re.compile(u'[\x20\x09\x0C\u200B\x0A\x0D]+')
def squash_html_whitespace(text):
# use raw extract_text for preformatted content (like <pre> content or set
# by CSS rules)
# apply this function on top of
return WHITESPACE_RE.sub(' ', text)
def _squash_artifical_nl(parts):
output, last_nl = [], False
for x in parts:
if x is not None:
output.append(x)
last_nl = False
elif not last_nl:
output.append(None)
last_nl = True
return output
def _strip_artifical_nl(parts):
if not parts:
return parts
for start_idx, pt in enumerate(parts):
if isinstance(pt, str):
# 0, 1, 2, index of first string [start_idx:...
break
iterator = enumerate(parts[:start_idx - 1 if start_idx > 0 else None:-1])
for end_idx, pt in iterator:
if isinstance(pt, str): # 0=None, 1=-1, 2=-2, index of last string
break
return parts[start_idx:-end_idx if end_idx > 0 else None]
def _merge_original_parts(parts):
output, orp_buf = [], []
def flush():
if orp_buf:
item = squash_html_whitespace(''.join(orp_buf)).strip()
if item:
output.append(item)
orp_buf[:] = []
for x in parts:
if not isinstance(x, str):
flush()
output.append(x)
else:
orp_buf.append(x)
flush()
return output
def extract_text_array(dom, squash_artifical_nl=True, strip_artifical_nl=True):
if callable(dom.tag):
return ''
r = []
if dom.tag in SEPARATORS:
r.append(True) # equivalent of '\n' used to designate separators
elif dom.tag not in INLINE_TAGS:
# equivalent of '\n' used to designate artifically inserted newlines
r.append(None)
if dom.text is not None:
r.append(dom.text)
for child in dom.getchildren():
r.extend(extract_text_array(child, squash_artifical_nl=False,
strip_artifical_nl=False))
if child.tail is not None:
r.append(child.tail)
if dom.tag not in INLINE_TAGS and dom.tag not in SEPARATORS:
# equivalent of '\n' used to designate artifically inserted newlines
r.append(None)
if squash_artifical_nl:
r = _squash_artifical_nl(r)
if strip_artifical_nl:
r = _strip_artifical_nl(r)
return r
def extract_text(dom, block_symbol='\n', sep_symbol='\n', squash_space=True):
a = extract_text_array(dom, squash_artifical_nl=squash_space)
if squash_space:
a = _strip_artifical_nl(_squash_artifical_nl(_merge_original_parts(a)))
result = ''.join(
block_symbol if x is None else (
sep_symbol if x is True else x
)
for x in a
)
if squash_space:
result = result.strip()
return result
|
146915
|
import json
from PIL import Image
import torch
from torchvision.transforms import ToTensor
from codes.datasets.MVM3D import *
import warnings
from codes.EX_CONST import Const
warnings.filterwarnings("ignore")
class MVM3D_loader(VisionDataset):
def __init__(self, base, train=True, transform=ToTensor(), target_transform=ToTensor(),
reID=False, grid_reduce=Const.reduce, img_reduce=Const.reduce):
super().__init__(base.root, transform=transform, target_transform=target_transform)
self.reID, self.grid_reduce, self.img_reduce = reID, grid_reduce, img_reduce
self.base = base
self.train = train
self.root, self.num_cam = base.root, base.num_cam
self.img_shape, self.worldgrid_shape = base.img_shape, base.worldgrid_shape # H,W; N_row,N_col
self.reducedgrid_shape = list(map(lambda x: int(x / self.grid_reduce), self.worldgrid_shape))
self.extrinsic_matrix = base.extrinsic_matrices
self.intrinsic_matrix = base.intrinsic_matrices
# split the dataset according to the
if train == 1:
frame_range = list(range(0, 1800)) + list(range(2100, 3500)) + list(range(3600, 4330))
elif train == 3:
frame_range = list (range(2000, 2100)) + list(range(3500, 3600))
elif train == 2:
frame_range = list(range(1800, 2100)) + list(range(3500, 3600))
elif train == 4:
frame_range = list(range(0, 1625))
self.upsample_shape = list(map(lambda x: int(x / self.img_reduce), self.img_shape))
img_reduce_local = np.array(self.img_shape) / np.array(self.upsample_shape)
imgcoord2worldgrid_matrices = get_imgcoord2worldgrid_matrices(base.intrinsic_matrices,
base.extrinsic_matrices,
base.worldgrid2worldcoord_mat)
img_zoom_mat = np.diag(np.append(img_reduce_local, [1]))
map_zoom_mat = np.diag(np.append(np.ones([2]) / self.grid_reduce, [1]))
self.proj_mats = [torch.from_numpy(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)
for cam in range(2)]
# create angle bins
bins = Const.bins
overlap = 0.1
self.bins = bins
self.angle_bins = np.zeros(bins)
self.interval = 2 * np.pi / bins
for i in range(1, bins):
self.angle_bins[i] = i * self.interval
self.angle_bins += self.interval / 2 # center of the bin
self.overlap = overlap
# ranges for confidence
self.bin_ranges = []
for i in range(0, bins):
self.bin_ranges.append(((i * self.interval - overlap) % (2 * np.pi), \
(i * self.interval + self.interval + overlap) % (2 * np.pi)))
self.bev_bboxes = {}
self.left_bboxes = {}
self.right_bboxes = {}
self.left_dir = {}
self.right_dir = {}
self.left_angle = {}
self.right_angle = {}
self.left_orientation = {}
self.left_conf = {}
self.right_orientation = {}
self.right_conf = {}
self.world_xy = {}
self.bev_angle = {}
self.mark = {}
self.img_fpaths = self.base.get_image_fpaths(frame_range)
if train:
self.gt_fpath = os.path.join(self.root, 'res/train_gt.txt')
else:
self.gt_fpath = os.path.join(self.root, 'res/test_gt.txt')
self.prepare_gt(frame_range)
self.prepare_bbox(frame_range)
self.prepare_dir(frame_range)
self.prepare_bins(frame_range)
def get_bin(self, angle):
bin_idxs = []
def is_between(min, max, angle):
max = (max - min) if (max - min) > 0 else (max - min) + 2*np.pi
angle = (angle - min) if (angle - min) > 0 else (angle - min) + 2*np.pi
return angle < max
for bin_idx, bin_range in enumerate(self.bin_ranges):
if is_between(bin_range[0], bin_range[1], angle):
bin_idxs.append(bin_idx)
return bin_idxs
def prepare_bins(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_left_dir = []
frame_right_dir = []
frame_left_ang = []
frame_right_ang = []
frame_wxy = []
frame_bev_angle = []
frame_left_orientation = []
frame_left_conf = []
frame_right_orientation = []
frame_right_conf = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
wx = int(car["wx"]) // 10
wy = int(car["wy"]) // 10
mk = int(car["mark"])
# left_dir = int(car["direc_left"])
# right_dir = int(car["direc_right"])
left_dir = 0
right_dir = 0
bev_angle = float(car["angle"])
frame_wxy.append([wx, wy])
if Const.roi_classes != 1:
frame_left_dir.append(left_dir)
frame_right_dir.append(right_dir)
else:
frame_left_dir.append(0)
frame_right_dir.append(0)
# 0~360
if bev_angle < 0:
bev_angle += 2 * np.pi
# 左角度标签
alpha = np.arctan((Const.grid_height - wy) / wx)
left_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# if frame in range(500, 600) and i == 2:
# print(wx, wy)
# print(np.rad2deg(bev_angle))
# print(np.rad2deg(alpha))
# print(np.rad2deg(left_target))
# print(np.arctan(np.sin(left_target) / np.cos(left_target)))
# frame_left_ang.append([np.sin(left_target), np.cos(left_target)]) # 方案1, 回归sin cos
left_orientation = np.zeros((self.bins, 2))
left_confidence = np.zeros(self.bins)
left_bin_idxs = self.get_bin(left_target)
for bin_idx in left_bin_idxs:
angle_diff = left_target - self.angle_bins[bin_idx]
left_orientation[bin_idx, :] = np.array([np.cos(angle_diff), np.sin(angle_diff)])
left_confidence[bin_idx] = 1
# print("left conf", left_confidence)
frame_left_orientation.append(left_orientation)
frame_left_conf.append(left_confidence)
# 右角度标签, 颠倒一下正方向
bev_angle -= np.pi
if bev_angle < 0:
bev_angle += 2 * np.pi
frame_bev_angle.append(bev_angle)
alpha = np.arctan(wy / (Const.grid_width - wx))
right_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# frame_right_ang.append([np.sin(right_target), np.cos(right_target)]) # 方案1, 回归sin cos
right_orientation = np.zeros((self.bins, 2))
right_confidence = np.zeros(self.bins)
right_bin_idxs = self.get_bin(right_target)
for bin_idx in right_bin_idxs:
angle_diff = right_target - self.angle_bins[bin_idx]
right_orientation[bin_idx, :] = np.array([np.cos(angle_diff), np.sin(angle_diff)])
right_confidence[bin_idx] = 1
# print("right conf", right_confidence)
frame_right_orientation.append(right_orientation)
frame_right_conf.append(right_confidence)
# print(frame_left_orientation)
self.left_orientation[frame] = frame_left_orientation
self.left_conf[frame] = frame_left_conf
self.right_orientation[frame] = frame_right_orientation
self.right_conf[frame] = frame_right_conf
def prepare_gt(self,frame_range):
og_gt = []
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
all_pedestrians = [json.load(json_file)][0]
for single_pedestrian in all_pedestrians:
def is_in_cam(cam):
return not (single_pedestrian['views'][cam]['xmin'] == -1 and
single_pedestrian['views'][cam]['xmax'] == -1 and
single_pedestrian['views'][cam]['ymin'] == -1 and
single_pedestrian['views'][cam]['ymax'] == -1)
in_cam_range = sum(is_in_cam(cam) for cam in range(self.num_cam))
if not in_cam_range:
continue
wx = single_pedestrian['wx']
wy = single_pedestrian['wy']
if wx > Const.grid_width * 10:
wx = Const.grid_width * 10 - 1
if wy > Const.grid_height * 10:
wy = Const.grid_height * 10 - 1
grid_x, grid_y= [wx //10, wy//10]
og_gt.append(np.array([frame, grid_x, grid_y]))
og_gt = np.stack(og_gt, axis=0)
os.makedirs(os.path.dirname(self.gt_fpath), exist_ok=True)
print(self.gt_fpath)
np.savetxt(self.gt_fpath, og_gt, '%d')
def prepare_bbox(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_bev_box = []
frame_left_box = []
frame_right_box = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
ymin_od = int(car["ymin_od"])
xmin_od = int(car["xmin_od"])
ymax_od = int(car["ymax_od"])
xmax_od = int(car["xmax_od"])
frame_bev_box.append([ymin_od, xmin_od, ymax_od, xmax_od])
for j in range(self.num_cam):
ymin = car["views"][j]["ymin"]
xmin = car["views"][j]["xmin"]
ymax = car["views"][j]["ymax"]
xmax = car["views"][j]["xmax"]
if j == 0:
frame_left_box.append([ymin, xmin, ymax, xmax])
else:
frame_right_box.append([ymin, xmin, ymax, xmax])
self.bev_bboxes[frame] = frame_bev_box
self.left_bboxes[frame] = frame_left_box
self.right_bboxes[frame] = frame_right_box
def prepare_dir(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_left_dir = []
frame_right_dir = []
frame_left_ang = []
frame_right_ang = []
frame_wxy = []
frame_bev_angle = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
wx = int(car["wx"]) // 10
wy = int(car["wy"]) // 10
mk = int(car["mark"])
# left_dir = int(car["direc_left"])
# right_dir = int(car["direc_right"])
left_dir = 0
right_dir = 0
bev_angle = float(car["angle"])
frame_wxy.append([wx, wy])
if Const.roi_classes != 1:
frame_left_dir.append(left_dir)
frame_right_dir.append(right_dir)
else:
frame_left_dir.append(0)
frame_right_dir.append(0)
# 0~360
if bev_angle < 0:
bev_angle += 2 * np.pi
# 左角度标签
alpha = np.arctan((Const.grid_height - wy) / wx)
left_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# if frame in range(500, 600) and i == 2:
# print(wx, wy)
# print(np.rad2deg(bev_angle))
# print(np.rad2deg(alpha))
# print(np.rad2deg(left_target))
# print(np.arctan(np.sin(left_target) / np.cos(left_target)))
frame_left_ang.append([np.sin(left_target), np.cos(left_target)]) # 方案1, 回归sin cos
# 右角度标签, 颠倒一下正方向
bev_angle -= np.pi
if bev_angle < 0:
bev_angle += 2 * np.pi
frame_bev_angle.append(bev_angle)
alpha = np.arctan(wy / (Const.grid_width - wx))
right_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
frame_right_ang.append([np.sin(right_target), np.cos(right_target)]) # 方案1, 回归sin cos
self.world_xy[frame] = frame_wxy
self.left_dir[frame] = frame_left_dir
self.right_dir[frame] = frame_right_dir
self.bev_angle[frame] = frame_bev_angle
self.left_angle[frame] = frame_left_ang
self.right_angle[frame] = frame_right_ang
self.mark[frame] = mk
def __getitem__(self, index):
frame = list(self.bev_bboxes.keys())[index]
imgs = []
for cam in range(self.num_cam):
fpath = self.img_fpaths[cam][frame]
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
imgs.append(img)
imgs = torch.stack(imgs)
bev_bboxes = torch.tensor(self.bev_bboxes[frame])
left_bboxes = torch.tensor(self.left_bboxes[frame])
right_bboxes = torch.tensor(self.right_bboxes[frame])
left_dirs = torch.tensor(self.left_dir[frame])
right_dirs = torch.tensor(self.right_dir[frame])
left_angles = torch.tensor(self.left_angle[frame])
right_angles = torch.tensor(self.right_angle[frame])
bev_xy =torch.tensor(self.world_xy[frame])
bev_angle = torch.tensor(self.bev_angle[frame])
mark = self.mark[frame]
left_orientation = torch.tensor(self.left_orientation[frame])
left_conf = torch.tensor(self.left_conf[frame])
right_orientation = torch.tensor(self.right_orientation[frame])
right_conf = torch.tensor(self.right_conf[frame])
return imgs, bev_xy, bev_angle, bev_bboxes, \
left_bboxes, right_bboxes,\
left_dirs, right_dirs, \
left_angles, right_angles, \
left_orientation, right_orientation, \
left_conf, right_conf, \
frame, \
self.extrinsic_matrix, self.intrinsic_matrix, \
mark
def __len__(self):
return len(self.bev_bboxes.keys())
def get_imgcoord2worldgrid_matrices(intrinsic_matrices, extrinsic_matrices, worldgrid2worldcoord_mat):
projection_matrices = {}
for cam in range(2):
worldcoord2imgcoord_mat = intrinsic_matrices[cam] @ np.delete(extrinsic_matrices[cam], 2, 1)
worldgrid2imgcoord_mat = worldcoord2imgcoord_mat @ worldgrid2worldcoord_mat
imgcoord2worldgrid_mat = np.linalg.inv(worldgrid2imgcoord_mat)
permutation_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
projection_matrices[cam] = permutation_mat @ imgcoord2worldgrid_mat
return projection_matrices
if __name__ == "__main__":
data_path = os.path.expanduser('/home/dzc/Data/4carreal_0318blend')
world_shape = Const.grid_size
base = Robomaster_1_dataset(data_path, None, worldgrid_shape = world_shape)
dataset = oftFrameDataset(base)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
num_workers=8, pin_memory=True, drop_last=True)
left_result = np.zeros((36,))
right_result = np.zeros((36,))
for batch_idx, data in enumerate(data_loader):
# print(batch_idx)
imgs, bev_xy, bev_angle, gt_bbox, gt_left_bbox, gt_right_bbox, left_dirs, right_dirs, left_sincos, right_sincos, frame, extrin, intrin = data
for i in range(4):
sin = left_sincos.squeeze()[i, 0]
cos = left_sincos.squeeze()[i, 1]
angle = np.arctan(sin / cos)
if (sin > 0 and cos < 0) or (sin < 0 and cos < 0):
angle += np.pi
if sin < 0 and cos > 0:
angle += np.pi * 2
angle = np.rad2deg(angle)
left_result[int(angle.item() // 10)] += 1
if frame in range(600, 700) and i == 0:
print("------------------")
print(frame)
print(angle.item())
sin = right_sincos.squeeze()[i, 0]
cos = right_sincos.squeeze()[i, 1]
angle = np.arctan(sin / cos)
if (sin > 0 and cos < 0) or (sin < 0 and cos < 0):
angle += np.pi
if sin < 0 and cos > 0:
angle += np.pi * 2
angle = np.rad2deg(angle)
right_result[int(angle.item() // 10)] += 1
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
X = np.arange(0, 36)
Y = left_result
fig = plt.figure()
plt.bar(X, Y, 0.4, color="green")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("left")
# plt.show()
# plt.savefig("/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/left_result.jpg")
X = np.arange(0, 36)
Y = right_result
fig = plt.figure()
plt.bar(X, Y, 0.4, color="green")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("right")
# plt.show()
# plt.savefig("/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/right_result.jpg")
|
146916
|
import os
import time
import logger
import random
import tensorflow as tf
import gym
import numpy as np
from collections import deque
from config import args
from utils import set_global_seeds, sf01, explained_variance
from agent import PPO
from env_wrapper import make_env
def main():
env = make_env()
set_global_seeds(env, args.seed)
agent = PPO(env=env)
batch_steps = args.n_envs * args.batch_steps # number of steps per update
if args.save_interval and logger.get_dir():
# some saving jobs
pass
ep_info_buffer = deque(maxlen=100)
t_train_start = time.time()
n_updates = args.n_steps // batch_steps
runner = Runner(env, agent)
for update in range(1, n_updates + 1):
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = args.lr # maybe dynamic change
clip_range_now = args.clip_range # maybe dynamic change
obs, returns, masks, acts, vals, neglogps, advs, rewards, ep_infos = \
runner.run(args.batch_steps, frac)
ep_info_buffer.extend(ep_infos)
loss_infos = []
idxs = np.arange(batch_steps)
for _ in range(args.n_epochs):
np.random.shuffle(idxs)
for start in range(0, batch_steps, args.minibatch):
end = start + args.minibatch
mb_idxs = idxs[start: end]
minibatch = [arr[mb_idxs] for arr in [obs, returns, masks, acts, vals, neglogps, advs]]
loss_infos.append(agent.train(lr_now, clip_range_now, *minibatch))
t_now = time.time()
time_this_batch = t_now - t_start
if update % args.log_interval == 0:
ev = float(explained_variance(vals, returns))
logger.logkv('updates', str(update) + '/' + str(n_updates))
logger.logkv('serial_steps', update * args.batch_steps)
logger.logkv('total_steps', update * batch_steps)
logger.logkv('time', time_this_batch)
logger.logkv('fps', int(batch_steps / (t_now - t_start)))
logger.logkv('total_time', t_now - t_train_start)
logger.logkv("explained_variance", ev)
logger.logkv('avg_reward', np.mean([e['r'] for e in ep_info_buffer]))
logger.logkv('avg_ep_len', np.mean([e['l'] for e in ep_info_buffer]))
logger.logkv('adv_mean', np.mean(returns - vals))
logger.logkv('adv_variance', np.std(returns - vals)**2)
loss_infos = np.mean(loss_infos, axis=0)
for loss_name, loss_info in zip(agent.loss_names, loss_infos):
logger.logkv(loss_name, loss_info)
logger.dumpkvs()
if args.save_interval and update % args.save_interval == 0 and logger.get_dir():
pass
env.close()
class Runner:
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.obs = np.zeros((args.n_envs,) + env.observation_space.shape, dtype=np.float32)
self.obs[:] = env.reset()
self.dones = [False for _ in range(args.n_envs)]
def run(self, batch_steps, frac):
b_obs, b_rewards, b_actions, b_values, b_dones, b_neglogps = [], [], [], [], [], []
ep_infos = []
for s in range(batch_steps):
actions, values, neglogps = self.agent.step(self.obs, self.dones)
b_obs.append(self.obs.copy())
b_actions.append(actions)
b_values.append(values)
b_neglogps.append(neglogps)
b_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeinfo = info.get('episode')
if maybeinfo:
ep_infos.append(maybeinfo)
b_rewards.append(rewards)
# batch of steps to batch of rollouts
b_obs = np.asarray(b_obs, dtype=self.obs.dtype)
b_rewards = np.asarray(b_rewards, dtype=np.float32)
b_actions = np.asarray(b_actions)
b_values = np.asarray(b_values, dtype=np.float32)
b_neglogps = np.asarray(b_neglogps, dtype=np.float32)
b_dones = np.asarray(b_dones, dtype=np.bool)
last_values = self.agent.get_value(self.obs, self.dones)
b_returns = np.zeros_like(b_rewards)
b_advs = np.zeros_like(b_rewards)
lastgaelam = 0
for t in reversed(range(batch_steps)):
if t == batch_steps - 1:
mask = 1.0 - self.dones
nextvalues = last_values
else:
mask = 1.0 - b_dones[t + 1]
nextvalues = b_values[t + 1]
delta = b_rewards[t] + args.gamma * nextvalues * mask - b_values[t]
b_advs[t] = lastgaelam = delta + args.gamma * args.lam * mask * lastgaelam
b_returns = b_advs + b_values
return (*map(sf01, (b_obs, b_returns, b_dones, b_actions, b_values, b_neglogps, b_advs, b_rewards)), ep_infos)
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger.configure()
main()
|
146957
|
from dataclasses import dataclass
@dataclass
class DiversityFilterParameters:
name: str
minscore: float = 0.4
bucket_size: int = 25
minsimilarity: float = 0.4
|
147009
|
rest_api_version = 99
extensions = dict(
required_params=['training_frame', 'x'],
validate_required_params="",
set_required_params="""
parms$training_frame <- training_frame
if(!missing(x))
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
""",
with_model="""
model@model$aggregated_frame_id <- model@model$output_frame$name
""",
module="""
#' Retrieve an aggregated frame from an Aggregator model
#'
#' Retrieve an aggregated frame from the Aggregator model and use it to create a new frame.
#'
#' @param model an \linkS4class{H2OClusteringModel} corresponding from a \code{h2o.aggregator} call.
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' df <- h2o.createFrame(rows = 100,
#' cols = 5,
#' categorical_fraction = 0.6,
#' integer_fraction = 0,
#' binary_fraction = 0,
#' real_range = 100,
#' integer_range = 100,
#' missing_fraction = 0)
#' target_num_exemplars = 1000
#' rel_tol_num_exemplars = 0.5
#' encoding = "Eigen"
#' agg <- h2o.aggregator(training_frame = df,
#' target_num_exemplars = target_num_exemplars,
#' rel_tol_num_exemplars = rel_tol_num_exemplars,
#' categorical_encoding = encoding)
#' # Use the aggregated frame to create a new dataframe
#' new_df <- h2o.aggregated_frame(agg)
#' }
#' @export
h2o.aggregated_frame <- function(model) {
key <- model@model$aggregated_frame_id
h2o.getFrame(key)
}
""",
)
doc = dict(
preamble="""
Build an Aggregated Frame
Builds an Aggregated Frame of an H2OFrame.
""",
params=dict(
x="""A vector containing the \code{character} names of the predictors in the model."""
),
examples="""
library(h2o)
h2o.init()
df <- h2o.createFrame(rows = 100,
cols = 5,
categorical_fraction = 0.6,
integer_fraction = 0,
binary_fraction = 0,
real_range = 100,
integer_range = 100,
missing_fraction = 0)
target_num_exemplars = 1000
rel_tol_num_exemplars = 0.5
encoding = "Eigen"
agg <- h2o.aggregator(training_frame = df,
target_num_exemplars = target_num_exemplars,
rel_tol_num_exemplars = rel_tol_num_exemplars,
categorical_encoding = encoding)
"""
)
|
147024
|
import signal
import os
import random
def get_n_running_proc(procs):
statuses = [proc.poll() for proc in procs]
n_proc = sum([1 for st in statuses if st is None]) # None from proc.poll() means that process is still running
return n_proc
def get_n_gpu_proc(gpu):
gpu_command = """nvidia-smi -g """ + str(gpu) + """ | awk '$2=="Processes:" {p=1} p && $3 > 0 {print $3}'"""
output = os.popen(gpu_command).read()
gpu_procs = [s for s in output.split('\n') if s not in ['GPU', 'PID', '', '0', 'running']]
return len(gpu_procs)
def get_free_gpu():
gpus = list(range(8))
max_n_per_gpu = 1
for gpu in gpus:
if get_n_gpu_proc(gpu) < max_n_per_gpu:
return gpu
|
147029
|
import pytest
import numpy as np
def assert_equal(arr, arr2):
assert np.array_equal(arr, arr2)
assert arr.dtype == arr2.dtype
def test_bulk_importer_ndarray(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_ndarray(column, key, shape, dtype, multiplier):
size = np.prod(shape)
arr = np.arange(size, dtype=dtype).reshape(shape) * multiplier
yield UDF_Return(column=column, key=key, data=arr)
co = repo.checkout(write=True)
co.add_ndarray_column('arr', shape=(5, 5), dtype=np.uint32)
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'arr',
'key': idx,
'shape': (5, 5),
'dtype': np.uint32,
'multiplier': idx
}
kwargs.append(_kw_dict)
for _udf_val in make_ndarray(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['arr'],
udf=make_ndarray,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
arr_col = co['arr']
assert len(arr_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in arr_col
assert_equal(arr_col[_expected_udf_val.key], _expected_udf_val.data)
finally:
co.close()
def test_bulk_importer_pystr(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_pystr(column, key, str_val):
yield UDF_Return(column=column, key=key, data=str_val)
co = repo.checkout(write=True)
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'str',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pystr(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['str'],
udf=make_pystr,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
str_col = co['str']
assert len(str_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in str_col
assert str_col[_expected_udf_val.key] == _expected_udf_val.data
finally:
co.close()
def test_bulk_importer_pybytes(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_pybytes(column, key, str_val):
raw = str_val.encode()
yield UDF_Return(column=column, key=key, data=raw)
co = repo.checkout(write=True)
co.add_bytes_column('bytes')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'bytes',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pybytes(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['bytes'],
udf=make_pybytes,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
bytes_col = co['bytes']
assert len(bytes_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in bytes_col
assert bytes_col[_expected_udf_val.key] == _expected_udf_val.data
finally:
co.close()
def test_bulk_importer_two_col_pybytes_pystr(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def _make_pystr(column, key, str_val):
yield UDF_Return(column=column, key=key, data=str_val)
def _make_pybytes(column, key, str_val):
raw = str_val.encode()
yield UDF_Return(column=column, key=key, data=raw)
def make_pystr_pybytes(str_col, bytes_col, key, str_val):
yield from _make_pystr(column=str_col, key=key, str_val=str_val)
yield from _make_pybytes(column=bytes_col, key=key, str_val=str_val)
co = repo.checkout(write=True)
co.add_bytes_column('bytes')
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'str_col': 'str',
'bytes_col': 'bytes',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pystr_pybytes(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 400
run_bulk_import(
repo,
branch_name='master',
column_names=['bytes', 'str'],
udf=make_pystr_pybytes,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
pybytes_col = co['bytes']
pystr_col = co['str']
assert len(pybytes_col) == 200
assert len(pystr_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.column in ['str', 'bytes']
if _expected_udf_val.column == 'str':
assert _expected_udf_val.key in pystr_col
assert pystr_col[_expected_udf_val.key] == _expected_udf_val.data
elif _expected_udf_val.column == 'bytes':
assert _expected_udf_val.key in pystr_col
assert pybytes_col[_expected_udf_val.key] == _expected_udf_val.data
else:
raise ValueError(_expected_udf_val.column)
finally:
co.close()
def test_signature_wrong(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def wrong_sig_udf(a, b, c=None):
yield UDF_Return(column='str', key=a, data=f'{a} {b} {c}')
co = repo.checkout(write=True)
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
for idx in range(200):
_kw_dict = {
'a': 'bytes',
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
with pytest.raises(TypeError):
run_bulk_import(
repo,
branch_name='master',
column_names=['str'],
udf=wrong_sig_udf,
udf_kwargs=kwargs,
ncpus=2)
|
147062
|
from typing import Dict, Optional, List
from maggma.builders.map_builder import MapBuilder
from maggma.core import Store
from pymatgen.core.structure import Structure
from emmet.core.robocrys import RobocrystallogapherDoc
from emmet.core.utils import jsanitize
class RobocrystallographerBuilder(MapBuilder):
def __init__(
self,
oxidation_states: Store,
robocrys: Store,
query: Optional[Dict] = None,
**kwargs
):
self.oxidation_states = oxidation_states
self.robocrys = robocrys
self.kwargs = kwargs
self.robocrys.key = "material_id"
self.oxidation_states.key = "material_id"
super().__init__(
source=oxidation_states,
target=robocrys,
query=query,
projection=["material_id", "structure", "deprecated"],
**kwargs
)
def unary_function(self, item):
structure = Structure.from_dict(item["structure"])
mpid = item["material_id"]
deprecated = item["deprecated"]
doc = RobocrystallogapherDoc.from_structure(
structure=structure,
material_id=mpid,
deprecated=deprecated,
fields=[],
)
return jsanitize(doc.dict(), allow_bson=True)
|
147074
|
import unittest
from unittest import TestCase
from transformers import BertConfig, BertForQuestionAnswering
from nn_pruning.model_structure import BertStructure
from nn_pruning.modules.masked_nn import (
ChannelPruningModulePatcher,
JointPruningModulePatcher,
LinearPruningArgs,
LinearPruningModulePatcher,
LinearPruningArgs,
)
from nn_pruning.training_patcher import LinearModelPatcher, PatcherContext
class TestFun(TestCase):
MODEL_STRUCTURE = BertStructure
def test_base(self):
config = BertConfig.from_pretrained("bert-base-uncased")
model = BertForQuestionAnswering(config)
patcher = LinearModelPatcher({}, self.MODEL_STRUCTURE)
layers = patcher.get_patchable_layers(model)
# for regexp, layers in layers.items():
# print(regexp)
def test_patch_module_independent_parameters(self):
config = BertConfig.from_pretrained("bert-base-uncased")
model = BertForQuestionAnswering(config)
parameters = LinearPruningArgs(
method="topK",
submethod="default",
ampere_method="disabled",
block_rows=32,
block_cols=32,
min_elements=0.005,
)
context = PatcherContext()
p = LinearPruningModulePatcher(context, parameters, self.MODEL_STRUCTURE)
module_patchers = dict(query=p, key=p, value=p, att_dense=p, interm_dense=p, output_dense=p)
patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
patcher.patch(model)
self.assertEqual(patcher.stats["patched"], 72)
key_sizes = {k: len(v) for k, v in context.context_modules.items()}
self.assertEqual(key_sizes, {"mask": 72})
def test_patch_module_ampere(self):
config = BertConfig.from_pretrained("bert-base-uncased")
model = BertForQuestionAnswering(config)
parameters = LinearPruningArgs(
method="topK",
submethod="default",
ampere_method="annealing",
block_rows=32,
block_cols=32,
min_elements=0.005,
)
context = PatcherContext()
p = LinearPruningModulePatcher(context, parameters, self.MODEL_STRUCTURE)
module_patchers = dict(query=p, key=p, value=p, att_dense=p, interm_dense=p, output_dense=p)
patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
patcher.patch(model)
self.assertEqual(patcher.stats["patched"], 72)
key_sizes = {k: len(v) for k, v in context.context_modules.items()}
self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 72})
def test_patch_module_tied_attention(self):
config = BertConfig.from_pretrained("bert-base-uncased")
model = BertForQuestionAnswering(config)
parameters = LinearPruningArgs(
method="topK",
submethod="default",
ampere_method="annealing",
block_rows=32,
block_cols=32,
min_elements=0.005,
)
context = PatcherContext()
p_attention = JointPruningModulePatcher(context, parameters, self.MODEL_STRUCTURE, "attention")
p_dense = LinearPruningModulePatcher(context, parameters, self.MODEL_STRUCTURE)
module_patchers = dict(
query=p_attention,
key=p_attention,
value=p_attention,
att_dense=p_dense,
interm_dense=p_dense,
output_dense=p_dense,
)
patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
patcher.patch(model)
self.assertEqual(patcher.stats["patched"], 72)
key_sizes = {k: len(v) for k, v in context.context_modules.items()}
self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 48})
def test_patch_tiedattention_line_pruning(self):
config = BertConfig.from_pretrained("bert-base-uncased")
model = BertForQuestionAnswering(config)
parameters_attention = LinearPruningArgs(
method="topK",
submethod="default",
ampere_method="annealing",
block_rows=32,
block_cols=32,
min_elements=0.005,
)
parameters_dense = LinearPruningArgs(
method="topK", submethod="1d", ampere_method="annealing", block_rows=32, block_cols=32, min_elements=0.005
)
context = PatcherContext()
p_attention = JointPruningModulePatcher(context, parameters_attention, self.MODEL_STRUCTURE, suffix=".attention")
p_dense = ChannelPruningModulePatcher(context, parameters_dense, self.MODEL_STRUCTURE, suffix="dense")
module_patchers = dict(
query=p_attention,
key=p_attention,
value=p_attention,
att_dense=p_dense,
interm_dense=p_dense,
output_dense=p_dense,
)
patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
patcher.patch(model)
self.assertEqual(patcher.stats["patched"], 72)
key_sizes = {k: len(v) for k, v in context.context_modules.items()}
for k, v in key_sizes.items():
print(k, v)
for k, v in context.context_modules.items():
print(k, v)
self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 12, "mask_1d": 48})
if __name__ == "__main__":
unittest.main()
|
147086
|
from .admin_feedback_list_view import AdminFeedbackListView
from .region_feedback_list_view import RegionFeedbackListView
from .admin_feedback_actions import (
mark_admin_feedback_as_read,
mark_admin_feedback_as_unread,
delete_admin_feedback,
)
from .region_feedback_actions import (
mark_region_feedback_as_read,
mark_region_feedback_as_unread,
delete_region_feedback,
)
|
147100
|
import torch
import numpy as np
from resnext import get_net, Conv, Bottleneck
config = dict()
config['flip'] = True
config['loss_idcs'] = [1]
net_type = 'resnext101'
config['net_type'] = net_type
input_size = [299, 299]
block = Conv
fwd_out = [64, 128, 256, 256, 256]
num_fwd = [2, 3, 3, 3, 3]
back_out = [64, 128, 256, 256]
num_back = [2, 3, 3, 3]
n = 1
hard_mining = 0
loss_norm = False
def get_model(shrink = 1, noise = 0):
net = get_net(input_size, block, fwd_out, num_fwd, back_out, num_back, n, shrink, noise, hard_mining, loss_norm)
return config, net
|
147114
|
import os
import numpy as np
from random import choices
from radar_scenes.sequence import get_training_sequences, get_validation_sequences, Sequence
from radar_scenes.labels import ClassificationLabel
from radar_scenes.evaluation import per_point_predictions_to_json, PredictionFileSchemas
class SemSegNetwork:
"""
This is a dummy class for a semantic segmentation neural network.
For training, it takes as input a point cloud X and per-point labels y.
The network then learns to predict a class label for each input point p in X.
However, an instance label (track id) is NOT predicted.
"""
def __init__(self):
self._y_true_test = None
def train(self, X, y):
"""
Dummy method for training the neural network.
:param X: training data. Shape (N_points, N_feat)
:param y: semantic class label for each point. Shape (N_batch, N_points)
:return: None
"""
pass
def predict(self, X):
"""
Predicts a class label for each point in X.
This is a mock method which simply uses the true class labels from self._y_true_test to generate
a prediction which is likely correct
:param X: validation data. Shape (N_points, N_feat)
:return: an array of shape (N_points, ) containing the predicted class labels
"""
y_pred = []
for y in self._y_true_test:
if ClassificationLabel(y) == ClassificationLabel.CAR:
proba_vector = [0.9, 0.01, 0.01, 0.03, 0.04, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.PEDESTRIAN:
proba_vector = [0.01, 0.90, 0.06, 0.02, 0.00, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.PEDESTRIAN_GROUP:
proba_vector = [0.015, 0.04, 0.88, 0.045, 0.01, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.TWO_WHEELER:
proba_vector = [0.06, 0.04, 0.03, 0.84, 0.01, 0.02]
elif ClassificationLabel(y) == ClassificationLabel.LARGE_VEHICLE:
proba_vector = [0.05, 0.01, 0.02, 0.03, 0.88, 0.01]
else:
proba_vector = [0.02, 0.005, 0.005, 0.005, 0.005, 0.96]
yy = choices([0, 1, 2, 3, 4, 5], weights=proba_vector)[0]
y_pred.append(yy)
return y_pred
class InstSegNetwork(SemSegNetwork):
def __init__(self):
super().__init__()
self._y_inst_true = None
self.last_instance_id = 1
self.translation_dict = {}
def train(self, X, y, y_inst):
pass
def predict(self, X: np.ndarray):
"""
Prediction method of the instance segmentation mock.
A class label and an instance label is predicted for each detection in X.
:param X: Array holding the individual detections
:return: predicted class labels and predicted instance labels.
"""
y_pred = super().predict(X)
# the viewer treats instance ID = -1 as "no instance". Therefore, this is used as default value for the instance
# labels.
y_inst_pred = np.zeros(len(X), dtype=np.int32) - 1
# translate string uuids to integers
for tr_uuid in set(self._y_inst_true):
if tr_uuid not in self.translation_dict:
self.translation_dict[tr_uuid] = self.last_instance_id
self.last_instance_id += 1
# iterate over true instance labels and assign new labels as prediction
for idx, true_instance_id in enumerate(self._y_inst_true):
if (true_instance_id == b"" or true_instance_id == "") and y_pred[idx] != ClassificationLabel.STATIC.value:
# a detection without a true instance id but with a predicted class label of a dynamic object gets a
# new instance label
y_inst_pred[idx] = self.last_instance_id
self.last_instance_id += 1
else:
if np.random.random() < 0.1:
# with a 10% chance, a point gets a different track id than all other points of this object
y_inst_pred[idx] = self.last_instance_id
self.last_instance_id += 1
else:
# assign the same integer instance label to all other points of an instance
y_inst_pred[idx] = self.translation_dict[true_instance_id]
# set default instance id for all points with label "STATIC"
idx = np.where(np.array(y_pred) == ClassificationLabel.STATIC.value)[0]
y_inst_pred[idx] = -1
return y_pred, y_inst_pred
def features_from_radar_data(radar_data):
"""
Generate a feature vector for each detection in radar_data.
The spatial coordinates as well as the ego-motion compensated Doppler velocity and the RCS value are used.
:param radar_data: Input data
:return: numpy array with shape (len(radar_data), 4), contains the feature vector for each point
"""
X = np.zeros((len(radar_data), 4)) # construct feature vector
X[:, 0] = radar_data["x_cc"]
X[:, 1] = radar_data["y_cc"]
X[:, 2] = radar_data["vr_compensated"]
X[:, 3] = radar_data["rcs"]
return X
def train_data_generator(training_sequences: list, path_to_dataset: str, return_track_ids=False):
"""
Given a list of training sequence names and the path to the data set,
the sequences are loaded and from each sequence 5 scenes are randomly chosen and returned as training data
This is only a mock training data generator. A true generator would require some more work.
:param training_sequences: list of sequence names
:param path_to_dataset: path to the dataset on the hard drive
:param return_track_ids: If true, in addition to the feature vectors and class labels, also the track ids are
returned.
:return: feature vectors and true labels.
"""
for sequence_name in training_sequences:
try:
sequence = Sequence.from_json(os.path.join(path_to_dataset, "data", sequence_name, "scenes.json"))
except FileNotFoundError:
continue
timestamps = sequence.timestamps # obtain all time stamps available in the sequence
chosen_times = np.random.choice(timestamps, 5) # choose five of them randomly
for t in chosen_times: # iterate over the selected timestamps
scene = sequence.get_scene(t) # collect the data which belong to the current timestamp
radar_data = scene.radar_data # retrieve the radar data which belong to this scene
y_true = np.array([ClassificationLabel.label_to_clabel(x) for x in radar_data["label_id"]]) # map labels
valid_points = y_true != None # filter invalid points
y_true = y_true[valid_points] # keep only valid points
y_true = [x.value for x in y_true] # get value of enum type to work with integers
track_ids = radar_data["track_id"]
X = features_from_radar_data(radar_data[valid_points]) # construct feature vector
if return_track_ids:
yield X, y_true, track_ids
else:
yield X, y_true
def validation_data_generator(validation_sequences: list, path_to_dataset: str, return_track_ids=False):
"""
Similar to the mock training data generator, this generator method returns validation data.
:param validation_sequences: List of sequence names which should be used for validation of a classifier
:param path_to_dataset: path to the data set on the hard drive
:param return_track_ids: If true, in addition to the feature vectors and class labels, also the track ids are
returned.
:return: Feature vectors X, true labels y_true, detection uuids, the sequence name, and optionally the track_ids
"""
for sequence_name in validation_sequences:
try:
sequence = Sequence.from_json(os.path.join(path_to_dataset, "data", sequence_name, "scenes.json"))
except FileNotFoundError:
continue
for scene in sequence.scenes(): # iterate over all scenes in the sequence
radar_data = scene.radar_data # retrieve the radar data which belong to this scene
y_true = np.array([ClassificationLabel.label_to_clabel(x) for x in radar_data["label_id"]]) # map labels
valid_points = y_true != None # filter invalid points
y_true = y_true[valid_points] # keep only valid points
y_true = [x.value for x in y_true] # get value of enum type to work with integers
X = features_from_radar_data(radar_data[valid_points]) # construct feature vector
uuids = radar_data["uuid"][valid_points]
track_ids = radar_data["track_id"][valid_points]
if return_track_ids:
yield X, y_true, uuids, sequence_name, track_ids
else:
yield X, y_true, uuids, sequence_name
def main():
# MODIFY THIS LINE AND INSERT PATH WHERE YOU STORED THE RADARSCENES DATASET
path_to_dataset = "/home/USERNAME/datasets/RadarScenes"
sequence_file = os.path.join(path_to_dataset, "data", "sequences.json")
if not os.path.exists(sequence_file):
print("Please modify this example so that it contains the correct path to the dataset on your machine.")
return
# load sequences.json file and obtain list of sequences for training.
training_sequences = get_training_sequences(sequence_file)
# load sequences.json file and obtain list of sequences for validation.
validation_sequences = get_validation_sequences(sequence_file)
print("Found {} sequences for training and {} sequences for validation.".format(len(training_sequences),
len(validation_sequences)))
print("-" * 120)
print("Mocking a semantic segmentation network...")
classifier = SemSegNetwork()
# For this example, only a subset of the training/validation files is used.
# In a real application of course all files would be used
training_sequences = training_sequences[112:115]
validation_sequences = validation_sequences[23:24]
# training loop for the classifier
print("Training of mock-classifier...", end=" ", flush=True)
for X, y_true in train_data_generator(training_sequences, path_to_dataset):
classifier.train(X, y_true)
print("Done!")
# Validation loop
print("Evaluating trained classifier on validation data...", end=" ", flush=True)
predictions = {}
for X, y_true, uuids, sequence_name in validation_data_generator(validation_sequences, path_to_dataset):
if sequence_name not in predictions:
predictions[sequence_name] = {}
classifier._y_true_test = y_true # this is only used to set the internal data of our fake-classifier
y_pred = classifier.predict(X) # predict for each point in X a class label
for y, uid in zip(y_pred, uuids): # store predictions in a dictionary along with the uuid of the points
predictions[sequence_name][uid] = y
print("Done!")
current_dir = os.getcwd()
for sequence_name in predictions: # iterate over all unique sequences
name = os.path.splitext(sequence_name)[0]
output_name = os.path.join(current_dir, name + "_predictions.json") # create output name for this sequence
print("Writing predictions for sequence {} to file {}.".format(sequence_name, output_name))
# write predictions to json file. This file can be loaded with the GUI tool to visualize the predictions
per_point_predictions_to_json(predictions[sequence_name], output_name, ClassificationLabel.translation_dict(),
schema=PredictionFileSchemas.SemSeg)
print("Done with semantic segmentation!")
print("-" * 120)
print("\n")
print("Mocking an instance segmentation network...")
classifier = InstSegNetwork()
print("Training of mock-classifier...", end=" ", flush=True)
for X, y_true, y_inst in train_data_generator(training_sequences, path_to_dataset, return_track_ids=True):
classifier.train(X, y_true, y_inst)
print("Done!")
# Validation loop instance segmentation
print("Evaluating trained instance segmentation network on validation data...", end=" ", flush=True)
predictions = {}
for X, y_true, uuids, sequence_name, y_inst in validation_data_generator(validation_sequences, path_to_dataset,
return_track_ids=True):
if sequence_name not in predictions:
predictions[sequence_name] = {}
classifier._y_true_test = y_true # this is only used to set the internal data of our fake-classifier
classifier._y_inst_true = y_inst
y_pred_labelid, y_pred_instid = classifier.predict(X) # predict for each point in X a class label
for y_lid, y_tid, uid in zip(y_pred_labelid, y_pred_instid, uuids):
# store predictions in a dictionary along with the uuid of the points
predictions[sequence_name][uid] = [int(y_lid), int(y_tid)] # casting to int for JSON serialization
print("Done!")
# write instance segmentation results back to a file
current_dir = os.getcwd()
for sequence_name in predictions: # iterate over all unique sequences
name = os.path.splitext(sequence_name)[0]
output_name = os.path.join(current_dir,
name + "_inst_seg_predictions.json") # create output name for this sequence
print("Writing predictions for sequence {} to file {}.".format(sequence_name, output_name))
# write predictions to json file. This file can be loaded with the GUI tool to visualize the predictions
per_point_predictions_to_json(predictions[sequence_name], output_name, ClassificationLabel.translation_dict(),
schema=PredictionFileSchemas.InstSeg)
print("Done with instance segmentation!")
if __name__ == '__main__':
main()
|
147122
|
from __future__ import print_function
def dencrypt(s, n):
out = ""
for c in s:
if c >= "A" and c <= "Z":
out += chr(ord("A") + (ord(c) - ord("A") + n) % 26)
elif c >= "a" and c <= "z":
out += chr(ord("a") + (ord(c) - ord("a") + n) % 26)
else:
out += c
return out
def main():
s0 = "HELLO"
s1 = dencrypt(s0, 13)
print(s1) # URYYB
s2 = dencrypt(s1, 13)
print(s2) # HELLO
if __name__ == "__main__":
main()
|
147152
|
import os
import pytest
import pygame
from tests.shared_fixtures import _init_pygame, default_ui_manager
from tests.shared_fixtures import default_display_surface, _display_surface_return_none
from tests.shared_comparators import compare_surfaces
from pygame_gui.ui_manager import UIManager
from pygame_gui.elements.ui_horizontal_scroll_bar import UIHorizontalScrollBar
from pygame_gui.core.ui_container import UIContainer
from pygame_gui.core.interfaces import IUIManagerInterface
try:
pygame.MOUSEWHEEL
except AttributeError:
pygame.MOUSEWHEEL = -1
class TestUIHorizontalScrollBar:
def test_creation(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
assert scroll_bar.image is not None
def test_rebuild(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.rebuild()
assert scroll_bar.image is not None
def test_check_has_moved_recently(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# move the scroll bar a bit
scroll_bar.right_button.held = True
scroll_bar.update(0.2)
assert scroll_bar.check_has_moved_recently() is True
def test_check_update_buttons(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# scroll down a bit then up again to exercise update
scroll_bar.right_button.held = True
scroll_bar.update(0.3)
scroll_bar.right_button.held = False
scroll_bar.left_button.held = True
scroll_bar.update(0.3)
assert scroll_bar.check_has_moved_recently() is True
def test_check_update_sliding_bar(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# scroll down a bit then up again to exercise update
default_ui_manager.mouse_position = (100, 15)
scroll_bar.sliding_button.held = True
scroll_bar.update(0.3)
assert scroll_bar.grabbed_slider is True
scroll_bar.sliding_button.held = False
scroll_bar.update(0.3)
assert scroll_bar.grabbed_slider is False
def test_redraw_scroll_bar(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.redraw_scrollbar()
assert scroll_bar.sliding_button is not None
def test_reset_scroll_position(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.reset_scroll_position()
assert scroll_bar.scroll_position == 0.0 and scroll_bar.start_percentage == 0.0
def test_set_visible_percentage(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.start_percentage = 0.9
scroll_bar.set_visible_percentage(0.2)
assert scroll_bar.visible_percentage == 0.2
scroll_bar.set_visible_percentage(-0.2)
assert scroll_bar.visible_percentage == 0.0
scroll_bar.set_visible_percentage(1.9)
assert scroll_bar.visible_percentage == 1.0
def test_kill(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
assert len(default_ui_manager.get_root_container().elements) == 2
assert len(default_ui_manager.get_sprite_group().sprites()) == 6
scroll_bar_sprites = [default_ui_manager.get_root_container(),
scroll_bar,
scroll_bar.button_container,
scroll_bar.left_button,
scroll_bar.right_button,
scroll_bar.sliding_button]
assert default_ui_manager.get_sprite_group().sprites() == scroll_bar_sprites
scroll_bar.kill()
assert len(default_ui_manager.get_root_container().elements) == 0
assert len(default_ui_manager.get_sprite_group().sprites()) == 1
empty_sprites = [default_ui_manager.get_root_container()]
assert default_ui_manager.get_sprite_group().sprites() == empty_sprites
def test_process_event(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.hovered = True
assert scroll_bar.process_event(pygame.event.Event(pygame.MOUSEWHEEL, {'x': 0.5})) is True
assert scroll_bar.process_event(pygame.event.Event(pygame.MOUSEWHEEL, {'x': -0.5})) is True
def test_rebuild_from_theme_data_non_default(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_non_default.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.1,
manager=manager)
assert scroll_bar.image is not None
def test_rebuild_from_theme_data_no_arrow_buttons(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_no_arrows.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.1,
manager=manager)
assert scroll_bar.left_button is None
assert scroll_bar.right_button is None
assert scroll_bar.image is not None
@pytest.mark.filterwarnings("ignore:Invalid value")
@pytest.mark.filterwarnings("ignore:Colour hex code")
def test_rebuild_from_theme_data_bad_values(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_bad_values.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=1.0,
manager=manager)
assert scroll_bar.image is not None
def test_set_position(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(80, 100, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.set_position((200, 200))
# try to click on the scroll bar's left button
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (205, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.left_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (395, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.right_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (250, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.sliding_button.held is True
def test_set_relative_position(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
test_container = UIContainer(relative_rect=pygame.Rect(50, 50, 300, 250),
manager=default_ui_manager)
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(80, 100, 200, 30),
visible_percentage=0.25, manager=default_ui_manager,
container=test_container)
scroll_bar.set_relative_position((50, 50))
# try to click on the scroll bar's left button
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (105, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.left_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (295, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.right_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (150, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.sliding_button.held is True
def test_set_dimensions(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.set_dimensions((100, 60))
# try to click on the slider
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (195, 40)}))
# if we successfully clicked on the moved slider then this button should be True
assert scroll_bar.right_button.held is True
def test_disable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.disable()
# process a mouse button down event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
scroll_bar.update(0.1)
# process a mouse button up event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
assert scroll_bar.scroll_position == 0.0 and scroll_bar.is_enabled is False
def test_enable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.disable()
scroll_bar.enable()
# process a mouse button down event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
scroll_bar.update(0.1)
# process a mouse button up event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
assert scroll_bar.scroll_position != 0.0 and scroll_bar.is_enabled is True
def test_show(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager,
visible=0)
assert scroll_bar.visible == 0
assert scroll_bar.button_container.visible == 0
assert scroll_bar.sliding_button.visible == 0
assert scroll_bar.left_button.visible == 0
assert scroll_bar.right_button.visible == 0
scroll_bar.show()
assert scroll_bar.visible == 1
assert scroll_bar.button_container.visible == 1
assert scroll_bar.sliding_button.visible == 1
assert scroll_bar.left_button.visible == 1
assert scroll_bar.right_button.visible == 1
def test_hide(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
assert scroll_bar.visible == 1
assert scroll_bar.button_container.visible == 1
assert scroll_bar.sliding_button.visible == 1
assert scroll_bar.left_button.visible == 1
assert scroll_bar.right_button.visible == 1
scroll_bar.hide()
assert scroll_bar.visible == 0
assert scroll_bar.button_container.visible == 0
assert scroll_bar.sliding_button.visible == 0
assert scroll_bar.left_button.visible == 0
assert scroll_bar.right_button.visible == 0
def test_show_hide_rendering(self, _init_pygame, default_ui_manager, _display_surface_return_none):
resolution = (400, 400)
empty_surface = pygame.Surface(resolution)
empty_surface.fill(pygame.Color(0, 0, 0))
surface = empty_surface.copy()
manager = UIManager(resolution)
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(25, 25, 375, 150),
visible_percentage=0.25,
manager=manager,
visible=0)
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
scroll_bar.show()
manager.update(0.01)
manager.draw_ui(surface)
assert not compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
scroll_bar.hide()
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
|
147172
|
from collections import Counter, defaultdict
import csv
import json
import os
import random
import sys
from time import time
from metal.contrib.info_extraction.mentions import RelationMention
from metal.contrib.info_extraction.utils import mark_entities
import numpy as np
import torch
from scipy.sparse import issparse
from .explanation import Explanation
class PrintTimer:
"""Prints msg at start, total time taken at end."""
def __init__(self, msg, prefix="###"):
self.msg = msg
self.prefix = prefix + " " if len(prefix) > 0 else prefix
def __enter__(self):
self.t0 = time()
print("{0}{1}".format(self.prefix, self.msg))
def __exit__(self, type, value, traceback):
print ("{0}Done in {1:.1f}s.\n".format(self.prefix, time() - self.t0))
class ProgressBar(object):
def __init__(self, N, length=40):
# Protect against division by zero (N = 0 results in full bar being printed)
self.N = max(1, N)
self.nf = float(self.N)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * N) for i in range(101)])
self.ticks.add(N-1)
self.bar(0)
def bar(self, i):
"""Assumes i ranges through [0, N-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write(
"\r[{0}{1}] {2}%".format(
"="*b, " "*(self.length-b), int(100*((i+1) / self.nf))))
sys.stdout.flush()
def close(self):
# Move the bar to 100% before closing
self.bar(self.N-1)
sys.stdout.write("\n\n")
sys.stdout.flush()
class ExplanationIO(object):
def write(self, explanations, fpath):
explanations = explanations if isinstance(explanations, list) else [explanations]
with open(fpath, 'w') as tsvfile:
tsvwriter = csv.writer(tsvfile, delimiter='\t')
for exp in explanations:
if isinstance(exp.candidate, str):
candidate_id = exp.candidate
else:
candidate_id = exp.candidate.mention_id
tsvwriter.writerow([
exp.name,
exp.label,
candidate_id,
exp.condition,
])
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Wrote {} explanations to {}".format(len(explanations), fpath))
def read(self, fpath):
with open(fpath, 'r') as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter='\t')
num_read = 0
explanations = []
for (name, label, candidate_id, condition) in tsvreader:
explanations.append(
Explanation(
name=name,
label=int(label),
candidate=candidate_id,
condition=condition.strip(),
)
)
num_read += 1
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Read {} explanations from {}".format(num_read, fpath))
return explanations
def link_explanation_candidates(explanations, candidates):
"""Doc string goes here."""
target_candidate_ids = set()
linked = 0
print("Building list of target candidate ids...")
for e in explanations:
if e.candidate is not None and not isinstance(e.candidate, RelationMention):
target_candidate_ids.add(e.candidate)
elif e.candidate:
linked += 1
if linked == len(explanations):
print("All {} explanations are already linked to candidates.".format(
len(explanations)))
return explanations
else:
print("Collected {} unique target candidate ids from {} explanations.".format(
len(target_candidate_ids), len(explanations)))
if not target_candidate_ids:
print("No candidate hashes were provided. Skipping linking.")
return explanations
candidate_map = {}
print("Gathering desired candidates...")
for candidate in candidates:
if candidate.mention_id in target_candidate_ids:
candidate_map[candidate.mention_id] = candidate
if len(candidate_map) < len(target_candidate_ids):
num_missing = len(target_candidate_ids) - len(candidate_map)
print("Could not find {} target candidates with the following mention_ids (first 5):".format(
num_missing))
num_reported = 0
for i, c_hash in enumerate(target_candidate_ids):
if c_hash not in candidate_map:
print(c_hash)
num_reported += 1
if num_reported >= 5:
break
print("Found {}/{} desired candidates".format(
len(candidate_map), len(target_candidate_ids)))
print("Linking explanations to candidates...")
for e in explanations:
if not isinstance(e.candidate, RelationMention):
try:
e.candidate = candidate_map[e.candidate]
linked += 1
except KeyError:
pass
print("Linked {}/{} explanations".format(linked, len(explanations)))
return explanations
def sparse_to_indices(X):
"""Converts a sparse matrix into a tensor of the nonzero indices
Args:
X: an [n, num_features] one-hot scipy.sparse matrix
Returns:
X_idx: an [n, h] tensor where X_idx[i,:] is a zero-padded 1D tesnor of
the nonzero indices of X[i,:]
"""
if not issparse(X):
raise ValueError("X must be a scipy.sparse matrix")
nonzeros = X.nonzero()
indices = defaultdict(list)
for i, v in zip(nonzeros[0], nonzeros[1]):
indices[i].append(v + 1)
max_len = max(map(lambda x: len(x), indices.values()))
X_idx = torch.zeros(X.shape[0], max_len).long()
for i, values in indices.items():
X_idx[i, :len(values)] = torch.LongTensor(values)
return X_idx
def display_candidate(candidate):
tokens = candidate.tokens
positions = list(zip(candidate.word_starts, candidate.word_ends))
markers = ['{', '}', '{', '}']
marked = mark_entities(tokens, positions, markers, style='concatenate')
print(' '.join(marked))
print()
print(marked)
class CandidateViewer(object):
def __init__(self, candidates, shuffle=False, seed=None):
if seed:
random.seed(seed)
self.candidates = candidates
self.idx = -1
self.order = list(range(len(candidates)))
# Shuffle indirectly to not mess up alignment between candidates and
# other objects in the workspace (e.g., labels).
if shuffle:
random.shuffle(self.order)
def view(self):
self.idx += 1
if self.idx > len(self.order):
print("Exhausted provided candidate set")
return
c = self.candidates[self.order[self.idx]]
display_candidate(c)
return c
|
147182
|
import FWCore.ParameterSet.Config as cms
# Define arbitrary tracker material groups
from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *
from SimTracker.TrackerMaterialAnalysis.trackingMaterialGroups_cff import *
# Analyze and plot the tracking material
from SimTracker.TrackerMaterialAnalysis.trackingMaterialAnalyser_cfi import *
|
147238
|
from astropy import units as u
from six import reraise
from six.moves import zip_longest
import sys
import os
import errno
import itertools
_quantity = u.Quantity
def mkdir_p(path):
""" mkdir -p equivalent [used by get_datafile]"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def united(qty, unit):
if isinstance(qty,u.Quantity):
return qty.to(unit)
else:
return qty*u.Unit(unit)
def uvalue(qty, unit):
return united(qty, unit).value
def get_datafile(species, savedir='./'):
"""
Load a molecular data file and save it into the specified directory
"""
from astroquery.lamda import Lamda
datapath = os.path.join(savedir,species)
species,suffix = os.path.splitext(species)
if suffix == "":
datapath += ".dat"
elif suffix != ".dat":
raise ValueError("Molecular data file must either be a species name or species.dat")
if not os.path.isdir(savedir):
mkdir_p(savedir)
if not os.path.isfile(datapath):
data = Lamda.query(species, return_datafile=True)
with open(datapath,'w') as out:
out.writelines([d+"\n" for d in data])
return os.path.split(datapath)
def get_colliders(fn):
"""
Get the list of colliders in a LAMDA data file
"""
from astroquery import lamda
collrates,radtrans,enlevs = lamda.core.parse_lamda_datafile(fn)
colliders = collrates.keys()
return colliders
def verify_collisionratefile(fn):
"""
Verify that a RADEX collisional rate file is valid to avoid a RADEX crash
"""
from astroquery import lamda
if not os.path.exists(fn):
raise IOError("File {0} does not exist.".format(fn))
for qt in lamda.core.query_types:
try:
collrates,radtrans,enlevs = lamda.core.parse_lamda_datafile(fn)
except Exception as ex:
reraise(type(ex), type(ex)("Data file verification failed. The molecular data file may be corrupt." +
"\nOriginal Error in the parser: " +
ex.args[0]),
sys.exc_info()[2])
if len(collrates) == 0:
raise ValueError("No data found in the table for the category %s" % qt)
class QuantityOff(object):
""" Context manager to disable quantities """
def __enter__(self):
self._quantity = u.Quantity
u.Quantity = lambda value,unit: value
def __exit__(self, type, value, traceback):
u.Quantity = self._quantity
class NoVerify(object):
""" Context manager to disable verification of molecule files """
def __enter__(self):
self._verify_collisionratefile = verify_collisionratefile
globals()['verify_collisionratefile'] = lambda x: True
def __exit__(self, type, value, traceback):
globals()['verify_collisionratefile'] = self._verify_collisionratefile
class ImmutableDict(dict):
def __setitem__(self, key, value):
raise AttributeError("Setting items for this dictionary is not supported.")
def unitless(x):
if hasattr(x, 'value'):
return x.value
else:
return x
# silly tool needed for fortran misrepresentation of strings
# http://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def lower_keys(d):
""" copy dictionary with lower-case keys """
return {k.lower(): d[k] for k in d}
|
147243
|
from django.core.management.base import BaseCommand
from adminrestrict.models import AllowedIP
class Command(BaseCommand):
help = 'Remove an IP address from the Admin Allowed IP table'
def add_arguments(self, parser):
parser.add_argument('ip_address', type=str)
def handle(self, *args, **options):
ip_address = options['ip_address']
result = AllowedIP.objects.filter(ip_address=ip_address).delete()
num = result[0]
if num:
print('IP Address {0} has been removed from the allowed list'.format(ip_address))
else:
print('IP Address {0} was not found in allowed list'.format(ip_address))
|
147246
|
from .API_Elements import *
import json
class OAuth:
def __init__(self, bot, secret, redirect_uri, scope):
self.bot = bot
self.secret = secret
self.id = bot.get_self_user().id
self.redirect_uri = redirect_uri
self.scope = scope
def get_url(self):
"""
Get the url for authentification with discord
"""
return f"https://discord.com/api/oauth2/authorize?client_id={self.id}&redirect_uri={self.redirect_uri}&response_type=code&scope={'%20'.join(self.scope.split())}"
def get_token(self, code):
"""
Get the token of the user
code:
The code returned by the authentification
"""
data = {
"client_id": self.id,
"client_secret": self.secret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
"scope": self.scope
}
return self.bot.api("/oauth2/token","POST",data=data)
def __request_token(self,token,url):
headers = {
"Authorization": f"Bearer {json.loads(token)['access_token']}"
}
return self.bot.api(url,"GET",headers=headers)
def get_user(self,token):
"""
Get a :class:`User` object, represent the authentified user
token:
The token of the user
"""
if "identify" in self.scope:
return User(self.__request_token(token,"/users/@me"),self.bot)
return "Invalid Scope"
def get_guilds(self,token):
"""
Get a list of :class:`Guild` objects, the guilds where the user is
token:
The token of the user
"""
if "guilds" in self.scope:
return [Guild(guild,self.bot) for guild in self.__request_token(token,"/users/@me/guilds")]
return "Invalid Scope"
def add_guild_member(self, token, guild_id, user_id):
"""
Add the authentified user to a guild where the bot is
token:
The token of the user
guild_id:
The if of the guild to add
user_id:
The id of the user
"""
if "guilds.join" in self.scope:
return Member({**self.bot.api_call(f"/guilds/{guild_id}/members/{user_id}","PUT",json=json.loads(token)["access_token"]),"guild_id":guild_id})
return "Invalid Scope"
|
147270
|
def plug_in(symbol_values):
structure = symbol_values['structure']
factor = structure.composition.get_reduced_formula_and_factor()[1]
uc_cv = symbol_values.get("uc_cv")
uc_cp = symbol_values.get("uc_cp")
molar_cv = symbol_values.get("molar_cv")
molar_cp = symbol_values.get("molar_cp")
if uc_cv is not None:
return {"molar_cv": uc_cv / factor * 6.022E23}
if uc_cp is not None:
return {"molar_cp": uc_cp / factor * 6.022E23}
if molar_cv is not None:
return {"uc_cv": molar_cv * factor / 6.022E23}
if molar_cp is not None:
return {"uc_cp": molar_cp * factor / 6.022E23}
DESCRIPTION = """
Properties of a crystal structure, such as the number of sites in its
unit cell and its space group, as calculated by pymatgen.
"""
config = {
"name": "heat_capacity_unit_cell_conversion",
"connections": [
{
"inputs": [
"structure",
"uc_cv"
],
"outputs": [
"molar_cv",
]
},
{
"inputs": [
"structure",
"uc_cp"
],
"outputs": [
"molar_cp",
]
},
{
"inputs": [
"structure",
"molar_cv"
],
"outputs": [
"uc_cv",
]
},
{
"inputs": [
"structure",
"molar_cp"
],
"outputs": [
"uc_cp",
]
}
],
"categories": [],
"variable_symbol_map": {
"structure": "structure",
"uc_cv": "unit_cell_heat_capacity_constant_volume",
"uc_cp": "unit_cell_heat_capacity_constant_pressure",
"molar_cv": "molar_heat_capacity_constant_volume",
"molar_cp": "molar_heat_capacity_constant_pressure"
},
"units_for_evaluation": {
"uc_cv": "joule / kelvin",
"uc_cp": "joule / kelvin",
"molar_cv": "joule / kelvin / mol",
"molar_cp": "joule / kelvin / mol"
},
"description": DESCRIPTION,
"references": ["doi:10.1016/j.commatsci.2012.10.028"],
"implemented_by": [
"dmrdjenovich"
],
"plug_in": plug_in
}
|
147320
|
import os
import pytest
from shapely.geometry import Point
import trackintel as ti
@pytest.fixture
def testdata_tpls():
"""Read triplegs test data from files."""
pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife"))
pfs, sp = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
_, tpls = pfs.as_positionfixes.generate_triplegs(sp, method="between_staypoints")
return tpls
class TestTriplegs:
"""Tests for the TriplegsAccessor."""
def test_accessor_column(self, testdata_tpls):
"""Test if the as_triplegs accessor checks the required column for triplegs."""
tpls = testdata_tpls.copy()
assert tpls.as_triplegs
# check user_id
with pytest.raises(AttributeError):
tpls.drop(["user_id"], axis=1).as_triplegs
def test_accessor_geometry(self, testdata_tpls):
"""Test if the as_triplegs accessor requires geometry column."""
tpls = testdata_tpls.copy()
# check geometry
with pytest.raises(AttributeError, match="No geometry data set yet"):
tpls.drop(["geom"], axis=1).as_triplegs
def test_accessor_geometry_type(self, testdata_tpls):
"""Test if the as_triplegs accessor requires LineString geometry."""
tpls = testdata_tpls.copy()
# check geometry type
with pytest.raises(AttributeError, match="The geometry must be a LineString"):
tpls["geom"] = Point([(13.476808430, 48.573711823)])
tpls.as_triplegs
|
147333
|
import re
if __name__ == '__main__':
n = int(input())
r = r'^[H|h][I|i]\s[^D|d]'
for _ in range(n):
s = input()
if re.match(r, s):
print(s)
|
147369
|
class KikErrorException(Exception):
def __init__(self, xml_error, message=None):
self.message = message
self.xml_error = xml_error
def __str__(self):
return self.__repr__()
def __repr__(self):
if self.message is not None:
return self.message
else:
if "prettify" in dict(self.xml_error):
error_string = self.xml_error.prettify()
else:
error_string = self.xml_error
return "Kik error: \r\n" + error_string
class KikCaptchaException(KikErrorException):
def __init__(self, xml_error, message, captcha_url):
super().__init__(xml_error, message)
self.captcha_url = captcha_url
class KikLoginException(KikErrorException):
pass
class KikInvalidAckException(KikErrorException):
pass
class KikEmptyResponseException(KikErrorException):
pass
class KikApiException(Exception):
pass
class KikParsingException(Exception):
pass
class KikUploadError(Exception):
def __init__(self, status_code, reason=None):
self.status_code = reason
self.reason = reason
def __str__(self):
return self.__repr__()
def __repr__(self):
if self.reason is None:
return self.status_code
return f"[{self.status_code}] {self.reason}"
|
147372
|
from pandas import read_table, concat
from config import DATA_DIR
from data_sources.data_source import DataSource
from data_sources.sra import SRAExpressionLayer
from layers import ExpressionLayer
class Recount2(DataSource):
def __init__(self, meta_sra, *args, **kwargs):
# if not meta_sra:
super().__init__(*args, **kwargs)
self.meta_sra = meta_sra
def expression(self, studies):
expressions = {}
for study in studies:
expressions[study] = read_table(
DATA_DIR + f'/recount2/{study}.tsv.gz'
).set_index('gene_id')
df = concat(
expressions.values(),
axis='columns'
)
layer = SRAExpressionLayer(df, run_to_study={
run: study
for run in df.columns
})
return self.with_disease_info(layer)
def with_disease_info(self, layer):
runs = layer.columns
diseases = {
run: self.meta_sra.run_to_disease(run)
for run in runs
}
layer.run_to_disease = diseases
return layer
supported_layers = {
ExpressionLayer: expression
}
|
147376
|
from office365.entity import Entity
from office365.entity_collection import EntityCollection
from office365.planner.plans.plan import PlannerPlan
from office365.planner.tasks.task import PlannerTask
from office365.runtime.paths.resource_path import ResourcePath
class PlannerUser(Entity):
"""The plannerUser resource provide access to Planner resources for a user.
It doesn't contain any usable properties."""
@property
def plans(self):
"""Read-only. Nullable. Returns the plannerTasks assigned to the user.
:rtype: EntityCollection
"""
return self.get_property('plans',
EntityCollection(self.context, PlannerPlan,
ResourcePath("plans", self.resource_path)))
@property
def tasks(self):
"""Read-only. Nullable. Returns the plannerTasks assigned to the user.
:rtype: EntityCollection
"""
return self.get_property('tasks',
EntityCollection(self.context, PlannerTask,
ResourcePath("tasks", self.resource_path)))
|
147432
|
def anonymous_allowed(fn):
fn.authenticated = False
return fn
def authentication_required(fn):
fn.authenticated = True
return fn
|
147433
|
from deidentify.surrogates.generators import AgeSurrogates
def test_replace_one():
annotations = [
'88 jarige',
'24/25e',
'2',
'60',
'patient is 92',
'91 age',
'90 jaar oud',
'89 years',
'101'
]
age_surrogates = AgeSurrogates(annotations=annotations)
annotations_replaced = age_surrogates.replace_all()
assert annotations_replaced == [
'88 jarige',
'24/25e',
'2',
'60',
'patient is 89',
'89 age',
'89 jaar oud',
'89 years',
'89'
]
|
147441
|
import abc
from lbann import optimizers_pb2
import lbann.core.util
class Optimizer(abc.ABC):
"""Optimization algorithm for a neural network's parameters."""
def export_proto(self):
"""Construct and return a protobuf message."""
return optimizers_pb2.Optimizer()
# Generate Optimizer sub-classes from lbann.proto
# Note: The list of skip fields must be updated if any new fields are
# added to the Optimizer message in lbann.proto
if optimizers_pb2:
classes = lbann.core.util.generate_classes_from_protobuf_message(
optimizers_pb2.Optimizer,
base_class = Optimizer,
base_has_export_proto = True)
for c in classes:
globals()[c.__name__] = c
|
147454
|
import algoneer.dataset
from typing import Optional, Iterable, Any
from ..dataschema import AttributeSchema
import abc
class Attribute(abc.ABC):
def __init__(
self, dataset: "algoneer.dataset.Dataset", column: str, schema: AttributeSchema
) -> None:
self._schema = schema
self._column = column
self._dataset = dataset
def __getattr__(self, attr):
if attr.startswith("is_"):
_type = attr[3:]
if self.schema is not None and self.schema.type.name.lower() == _type:
return True
return False
raise AttributeError("not found")
@property
def column(self) -> str:
return self._column
@column.setter
def column(self, column: str) -> None:
self._column = column
@property
def roles(self) -> Iterable[str]:
return self._schema.roles
@property
def schema(self) -> AttributeSchema:
return self._schema
@schema.setter
def schema(self, schema: AttributeSchema) -> None:
self._schema = schema
@property
def dataset(self):
return self._dataset
@abc.abstractmethod
def __len__(self) -> int:
pass
@abc.abstractmethod
def sum(self) -> float:
pass
@abc.abstractmethod
def mean(self) -> float:
pass
@abc.abstractmethod
def min(self) -> float:
pass
@abc.abstractmethod
def max(self) -> float:
pass
@abc.abstractmethod
def __getitem__(self, item) -> Any:
pass
|
147491
|
import sys
import pytoulbar2
Lines = open(sys.argv[1], 'r').readlines()
N = len(Lines)
Matrix = [[int(e) for e in l.split(' ')] for l in Lines]
Top = 1 + N*N
K = int(sys.argv[2])
Var = [(chr(65 + i) if N < 28 else "x" + str(i)) for i in range(N)] # Political actor or any instance
# Var = ["ron","tom","frank","boyd","tim","john","jeff","jay","sandy","jerry","darrin","ben","arnie"] # Transatlantic
# Var = ["justin","harry","whit","brian","paul","ian","mike","jim","dan","ray","cliff","mason","roy"] # Sharpstone
# Var = ["Sherrif","CivilDef","Coroner","Attorney","HighwayP","ParksRes","GameFish","KansasDOT","ArmyCorps","ArmyReserve","CrableAmb","FrankCoAmb","LeeRescue","Shawney","BurlPolice","LyndPolice","RedCross","TopekaFD","CarbFD","TopekaRBW"] # Kansas
Problem = pytoulbar2.CFN(Top)
for u in range(K):
for v in range(K):
Problem.AddVariable("M_" + str(u) + "_" + str(v), range(2))
for i in range(N):
Problem.AddVariable(Var[i], range(K))
for u in range(K):
for v in range(K):
for i in range(N):
for j in range(N):
if i != j:
Problem.AddFunction(["M_" + str(u) + "_" + str(v), Var[i], Var[j]],
[1 if (u == k and v == l and Matrix[i][j] != m) else 0
for m in range(2) for k in range(K) for l in range(K)])
# self-loops
for u in range(K):
for i in range(N):
Problem.AddFunction(["M_" + str(u) + "_" + str(u), Var[i]],
[1 if (u == k and Matrix[i][i] != m) else 0
for m in range(2) for k in range(K)])
# breaking partial symmetries by fixing first (K-1) domain variables to be assigned to cluster less than or equal to their index
for l in range(K-1):
Problem.AddFunction([Var[l]], [Top if k > l else 0 for k in range(K)])
Problem.Dump(sys.argv[1].replace('.mat','.cfn'))
Problem.Solve()
|
147511
|
import datetime
from os import startfile, system
from pathlib import Path
from random import randint
from sys import exit as sysend
class Functions:
def __init__(self):
self.log_prefix = self.log_dated_names()
self.home_path = Path.home()
self.taskymain_path = self.home_path / "Tasky"
self.tasks_path = self.taskymain_path / "tasks.txt"
self.taskylog_path = self.taskymain_path / "taskylogs"
self.cookie_folder_path = self.taskylog_path/ "cookie"
def check_tasky_folders(self):
self.taskylog_path.mkdir(parents=True,exist_ok=True)
def cookie_dir(self):
if not self.cookie_folder_path.is_dir() :
return False, self.cookie_folder_path, 0
if not (self.cookie_folder_path / "cookies.txt").is_file():
return True, self.cookie_folder_path, 0
with open(self.cookie_folder_path / "cookies.txt", "r") as cookiefile:
count = cookiefile.readlines()
while "\n" in count:
count.remove("\n")
for i in range(len(count)):
count[i] = count[i].replace("\n", "")
if len(count) != 1 or not count[0].isdecimal():
return True, self.cookie_folder_path, 0
if 0 <= int(count[0]) <= 15:
return True, self.cookie_folder_path, int(count[0])
elif int(count[0]) > 15:
return True, self.cookie_folder_path, 15
else:
return True, self.cookie_folder_path, 0
@staticmethod
def log_dated_names():
t = datetime.datetime.now()
a = str(t)[:-10]
a = a.replace("-", "_")
a = a.replace(":", "")
a = a.replace(" ", "__")
return str(a)
def check_tasky_log(self):
try:
log_file = open(self.taskylog_path / f"{self.log_prefix}.log", "r")
log_file.close()
except FileNotFoundError:
log_file = open(self.taskylog_path / f"{self.log_prefix}.log", "w")
log_file.close()
def log(self, data):
with open(self.taskylog_path / f"{self.log_prefix}.log", "a") as file:
current_dt = str(datetime.datetime.now())[:-4]
file.write(f"{current_dt} >> {str(data)}" + "\n")
def check_tasks_txt(self):
try:
self.log("[INFO] attempted to open tasks.txt in read mode")
with open(self.tasks_path, "r") as b:
self.log("[INFO] attempt successful")
except FileNotFoundError:
self.log("[ERROR] attempt failed, can't find tasks.txt")
with open(self.tasks_path, "w") as b:
self.log("[INFO] created empty text file 'tasks.txt'")
def make_dicts(self):
months = {
"01": 31,
"02": 29,
"03": 31,
"04": 30,
"05": 31,
"06": 30,
"07": 31,
"08": 31,
"09": 30,
"10": 31,
"11": 30,
"12": 31,
}
self.log("[INFO] defined dict 1 (months)")
month_names = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
}
self.log("[INFO] defined dict 2 (month_names)")
return months, month_names
def clear(self):
self.log("[FUNCTION] starts -> clear()")
system("cls")
self.log("[INFO] output screen cleared")
self.log("[FUNCTION] ends -> clear()")
def info_bar(self, data, monthsdict):
data = str(data)
self.clear()
self.status(monthsdict)
print(f"<< {data.center(40)} >>" + "\n")
def timediff(self, tt, monthsdict):
self.log("[FUNCTION] starts -> timediff()")
tt = tt.split(":")
self.log(f"[INFO] split variable named 'tt' {tt} into 5 parts")
# time now
tn = datetime.datetime.now()
self.log("[INFO] calculated current date-time as variables")
tny = tn.strftime("%y")
self.log(f"[INFO] year: {tny}")
tnm = tn.strftime("%m")
self.log(f"[INFO] month: {tnm}")
tnd = tn.strftime("%d")
self.log(f"[INFO] date: {tnd}")
tnh = tn.strftime("%H")
self.log(f"[INFO] hours: {tnh}")
tnmin = tn.strftime("%M")
self.log(f"[INFO] min: {tnmin}")
# task time
tty = tt[0]
ttm = tt[1]
ttd = tt[2]
tth = tt[3]
ttmin = tt[4]
self.log("[INFO] stored 5 parts of var 'tt'")
self.log(
f"[INFO] year: {tty}, month: {ttm}, date: {ttd}, hours: {tth}, mins: {ttmin}"
)
diffy = int(tty) - int(tny)
diffm = int(ttm) - int(tnm)
diffd = int(ttd) - int(tnd)
diffh = int(tth) - int(tnh)
diffmin = int(ttmin) - int(tnmin)
self.log(
"[INFO] calculated differences between corresponding values of 'tt' and 'tn'"
)
if diffmin < 0:
diffmin = 60 + diffmin
diffh -= 1
if diffh < 0:
diffh = 24 + diffh
diffd -= 1
if diffd < 0:
diffd = monthsdict.get(str(tnm)) + diffd
if int(tnm) == 2 and int(tny) % 4 != 0:
diffd -= 1
diffm -= 1
if diffm < 0:
diffm = 12 + diffm
diffy -= 1
self.log("[INFO] adjusted negative differences 'diff'")
if diffy < 0:
output = "Task Expired".rjust(19)
else:
diffy = str(diffy)
diffm = str(diffm)
diffd = str(diffd)
diffh = str(diffh)
diffmin = str(diffmin)
self.log("[INFO] converted 'difference' numbers to strings")
if int(diffy) >= 1:
output = (
f"{diffy}y".rjust(3)
+ f"{diffm}M".rjust(4)
+ f"{diffd}d".rjust(4)
+ f"{diffh}h".rjust(4)
+ f"{diffmin}m".rjust(4)
)
elif int(diffm) >= 1:
output = (
f"{diffm}M".rjust(4 + 3)
+ f"{diffd}d".rjust(4)
+ f"{diffh}h".rjust(4)
+ f"{diffmin}m".rjust(4)
)
elif int(diffd) >= 1:
output = (
f"{diffd}d".rjust(4 + 7)
+ f"{diffh}h".rjust(4)
+ f"{diffmin}m".rjust(4)
)
elif int(diffh) >= 1:
output = f"{diffh}h".rjust(4 + 11) + f"{diffmin}m".rjust(4)
elif int(diffmin) >= 1 and int(diffmin) >= 30:
output = f"{diffmin}m".rjust(4 + 15)
else:
output = f"LESS THAN {diffmin} MIN".rjust(19)
self.log("[INFO] calculated time remaining for output")
self.log(f"[INFO] {output}")
self.log("[INFO] returned output")
self.log("[FUNCTION] ends -> timediff()")
return output
def read_and_sort_tasks_file(
self,
): # returns the current data sorted and separately in list
self.log("[FUNCTION] starts -> read_and_sort_tasks_file()")
with open(self.tasks_path, "r") as a:
self.log("[INFO] opened 'tasks.txt' in read mode")
x = a.readlines()
self.log("[INFO] stored every raw line of 'tasks.txt' in a list called 'x'")
self.log(x)
y = []
while "\n" in x:
x.remove("\n")
self.log("[INFO] removed newline characters from 'x'")
self.log(x)
for item in x:
item = item.replace("\n", "")
y += [item]
self.log("[INFO] removed newline characters from every item of 'x'")
self.log(y)
tasklist = self.sort_tasks(y)
self.log("[INFO] returned sorted list = tasklist")
self.log("[FUNCTION] ends -> read_and_sort_tasks_file()")
return tasklist
def sort_tasks(self, tlist):
self.log("[FUNCTION] starts -> sort_tasks(tlist, tdir)")
nums = []
self.log("[INFO] created empty list nums")
temp_dict = {}
self.log("[INFO] created empty dictionary temp_dict")
for task in tlist:
rawtime = task[:14]
rawtime = rawtime.replace(":", "")
nums += [int(rawtime)]
temp_dict[tlist.index(task)] = int(rawtime)
self.log(f"[INFO] nums = {nums}")
self.log(f"[INFO] temp_dict = {temp_dict}")
nums.sort()
self.log(f"[INFO] sorted nums list = {nums}")
for k, v in temp_dict.items():
nums[nums.index(v)] = tlist[k]
self.log(
"[INFO] replaced respective numbers with tasks of same index as nums' initial index"
)
sorted_output = "\n".join(nums)
with open(self.tasks_path, "w") as taskfile:
taskfile.write(sorted_output)
self.log("[INFO] sorted output written to tasks.txt")
self.log(f"[INFO] returned nums = {nums}")
self.log("[FUNCTION] ends -> sort_tasks()")
return nums
def status(self, monthsdict):
self.log("[FUNCTION] starts -> status()")
self.log("|||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("\n~~~~~~~~ TASKS REMAINING ~~~~~~~~\n")
task_list = self.read_and_sort_tasks_file()
self.log("[INFO] stored returned 'y' as 'task_list'")
outputs = []
self.log("[INFO] started iterating through 'task_list'")
for taskline in task_list:
taskparts = taskline.split("=")
self.log(f"[INFO] working with task number {task_list.index(taskline) + 1}")
rawtasktime = self.timediff(taskparts[0], monthsdict)
self.log(f"[INFO] rawtasktime: {rawtasktime}")
rawtaskinfo = taskparts[1]
self.log(f"[INFO] rawtaskinfo: {rawtaskinfo}")
taskoutput = [
f"({task_list.index(taskline) + 1}) {rawtasktime} >>> {rawtaskinfo.title()}"
]
outputs += taskoutput
self.log(
"[INFO] stored each task details separately as (task number) (task time remaining) >>> (task name)"
)
self.log(outputs)
status_output = "\n".join(outputs)
print(status_output + "\n")
self.log("[INFO] all task details printed on output screen")
self.log("|||||||||||||||||||||||||||||||||||||||||||||||||||||||")
self.log("[FUNCTION] ends -> status()")
def remove(self, num, monthsdict):
self.log(f"[FUNCTION] starts -> remove({num})")
last = self.read_and_sort_tasks_file()
self.log("[INFO] stored returned 'y' as 'last'")
self.log(f"[INFO] task {num} requested to be removed ")
rem_index = int(num) - 1
rem_task = last[rem_index]
last.remove(rem_task)
self.log(f"[INFO] removed requested task [{rem_task}] from the list")
self.log(last)
new_output = "\n".join(last)
with open(self.tasks_path, "w") as taskfile:
self.log("[INFO] opened 'tasks.txt' in write mode")
taskfile.write(new_output)
self.log("[INFO] wrote new output to 'tasks.txt'")
self.log(f"[FUNCTION] ends -> remove({num})")
self.info_bar(f"removed task {num} from the list", monthsdict)
def edit_task(self, num, monthsdict, monthnamesdict):
self.log(f"[FUNCTION] starts -> edit_task({num})")
last = self.read_and_sort_tasks_file()
self.log("[INFO] stored returned 'y' as 'last'")
task_ind = int(num) - 1
target_task = last[task_ind]
self.log(f"[INFO] task number {num} requested for edit")
ttask_time, ttask_name = target_task.split("=")
self.log("[INFO] stored values of task to be edited as ttask_time and ttask_name")
self.log(f"[INFO] original values: {ttask_time} and {ttask_name}")
edit_task_help = f"\nWhat needs to be edited in task {num}? (Enter corresponding number)\n1. Date-Time\n2. Task Description\n3. Both\n4. Exit EDIT MODE\n"
self.info_bar(f"edit mode for task {num}", monthsdict)
print(edit_task_help)
while True:
try:
edited = False
exited = False
self.log("[WAITING] FOR 'choice' INPUT")
choice = int(input("> "))
self.log(f"[INFO] received 'choice': {choice}")
if choice == 1:
self.log("[INFO] user input 1 to edit date-time only")
self.info_bar(f"task {num} edit: type 'cancel' to cancel", monthsdict)
mn, hr, dt, mth, yr = self.new_task_time(monthsdict, monthnamesdict)
if (mn, hr, dt, mth, yr) != (0, 0, 0, 0, 0):
ttask_time = f"{yr}:{mth}:{dt}:{hr}:{mn}"
self.log("[INFO] updated task details saved")
edited = True
else:
self.info_bar(f"edit mode for task {num}", monthsdict)
print(edit_task_help)
elif choice == 2:
self.log("[INFO] user input 2 to edit name only")
self.info_bar(f"task {num} edit: type 'cancel' to cancel", monthsdict)
ttask_name = self.new_task_name()
if ttask_name != "cancel":
self.log("[INFO] updated task details saved")
edited = True
else:
self.info_bar(f"edit mode for task {num}", monthsdict)
print(edit_task_help)
elif choice == 3:
self.log("[INFO] user input 3 to edit both task name and date-time")
self.info_bar(f"task {num} edit: type 'cancel' to cancel", monthsdict)
ttask_name = self.new_task_name()
if ttask_name != "cancel":
self.log("[INFO] updated task details saved")
mn, hr, dt, mth, yr = self.new_task_time(
monthsdict, monthnamesdict
)
if (mn, hr, dt, mth, yr) != (0, 0, 0, 0, 0):
ttask_time = f"{yr}:{mth}:{dt}:{hr}:{mn}"
self.log("[INFO] updated task details saved")
edited = True
else:
self.info_bar(f"edit mode for task {num}", monthsdict)
print(edit_task_help)
else:
self.info_bar(f"edit mode for task {num}", monthsdict)
print(edit_task_help)
elif choice == 4:
self.log(
f"[INFO] user input 4 to exit edit-mode for task number {num}"
)
exited = True
else:
self.log(f"[ERROR] invalid value entered in edit mode: {choice}")
self.log("[INFO] allowed values = 1, 2, 3, 4")
self.info_bar("choose out of 1, 2, 3, 4 only", monthsdict)
print(edit_task_help)
if edited:
edited_task = f"{ttask_time}={ttask_name}"
self.log(f"[INFO] old task: {last[task_ind]}")
last[task_ind] = edited_task
self.log("[INFO] replaced old task in 'last' with edited task")
self.log(f"[INFO] new task: {edited_task}")
with open(self.tasks_path, "w") as taskfile:
new_output = "\n".join(last)
taskfile.write(new_output)
self.log("[INFO] updated output written to 'tasks.txt'")
self.log(last)
self.log(
"[INFO] refreshing output screen with updated values of tasks"
)
self.info_bar("requested edit successful", monthsdict)
print(edit_task_help)
self.log("[INFO] refreshed")
if exited:
self.log(f"[INFO] exiting edit mode for task {num}")
self.info_bar(f"exited edit mode for task {num}", monthsdict)
break
except ValueError:
self.log(
"[ERROR] user typed weird shit instead of numbers... it wasn't very effective"
)
self.info_bar("numbers 1, 2, 3, 4 allowed only", monthsdict)
print(edit_task_help)
self.log("[INFO] edited name/date-time of requested task")
self.log(f"[FUNCTION] ends -> edit_task({num})")
def new_task_name(self):
self.log("[FUNCTION] starts -> new_task_name()")
while True:
self.log("[WAITING] for task description input")
taskinfo = input("New Task description (50 chars): ").strip()
self.log(f"[INFO] task description input: {taskinfo}")
if taskinfo == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_name()")
return taskinfo
elif taskinfo != "":
if len(taskinfo) <= 50:
if "=" not in taskinfo:
self.log("[INFO] stored input from user as task description")
self.log(f"[INFO] new task name: {taskinfo}")
self.log("[INFO] returned 'taskinfo'")
self.log("[FUNCTION] ends -> new_task_name()")
return taskinfo
else:
print("Task description cannot contain symbol =\n")
self.log("[ERROR] task description contains symbol =")
else:
print("Task description cannot be more than 50 characters\n")
self.log("[ERROR] task description is more than 50 characters")
else:
print("Task description cannot be empty\n")
self.log("[ERROR] task description cannot be empty")
def new_task_time(self, monthsdict, monthnamesdict):
self.log("[FUNCTION] starts -> new_task_time()")
while True:
while True: # ask for date
self.log("[WAITING] for date input")
tdate = input("Date: ").strip()
self.log(f"[INFO] date input: {tdate}")
if tdate.lower() == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_time()")
return 0, 0, 0, 0, 0
elif tdate.isdecimal() and (int(tdate) in range(1, 32)):
self.log(f"[INFO] date number valid")
if len(tdate) > 2:
tdate = tdate[-2:]
tdate = tdate.zfill(2)
self.log(
"[INFO] converted (if any) single digit date to double digit"
)
self.log(f"[INFO] {tdate}")
break
else:
self.log(
f"[ERROR] user doesn't know dates naturally go from 1 to 31, wrote: {tdate}"
)
print("Invalid date entered\n")
while True: # ask for month
self.log("[WAITING] for month input (num/words)")
tmonth = input("Month (number/name): ").lower().strip()
self.log(f"[INFO] month input: {tmonth}")
twordmonth = None
if tmonth == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_time()")
return 0, 0, 0, 0, 0
elif tmonth.isalpha():
self.log("[INFO] input is alphabetic")
for k, v in monthnamesdict.items():
self.log(f"[INFO] checking table month_names item = {k}: {v}")
if tmonth in k:
self.log(f"[INFO] {tmonth} in {k} = True")
twordmonth = k
tmonth = str(v).zfill(2)
self.log(
f"[INFO] corresponding number to the month {k} = {tmonth}"
)
break
self.log(f"[INFO] {tmonth} in {k} = False")
if tmonth.isdecimal():
self.log(
f"[INFO] tmonth modified to a number successfully: {tmonth}"
)
break
else:
self.log(f"[ERROR] seriously, what month is this: {tmonth}")
print("Invalid month entered\n")
elif str(tmonth).isdecimal() and (int(tmonth) in range(1, 13)):
self.log("[INFO] converting month number to a 2 digit number")
if len(tmonth) > 2:
tmonth = tmonth[-2:]
month_names_values = monthnamesdict.values()
tpos = list(month_names_values).index(int(tmonth))
twordmonth = list(monthnamesdict.keys())[tpos]
tmonth = tmonth.zfill(2)
self.log(f"[INFO] {tmonth}")
break
else:
self.log(
f"[ERROR] something wrong with the month entered by the user: {tmonth}"
)
print("Invalid month entered\n")
# check if this date exists in this month
if int(tdate) > monthsdict[tmonth]:
self.log(
f"[ERROR] umm, month {tmonth} a.k.a {twordmonth} doesn't have {tdate} days..."
)
print("Invalid date entered for the given month\n")
else:
self.log("[INFO] confirmed valid date for given month")
valid_date = True
special_feb_case = False
# special Feb 29 case
if int(tmonth) == 2 and int(tdate) == 29:
special_feb_case = True
valid_date = False
self.log(
"[INFO] user has entered the date 29 for the month 02 (February), year yet to be checked"
)
while True: # ask for year
self.log("[WAITING] for year input")
tyear = input("Year (yyyy) (2000-99): ").strip()
self.log(f"[INFO] year input: {tyear}")
if tyear.lower() == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_time()")
return 0, 0, 0, 0, 0
elif tyear.isdecimal() and (int(tyear) in range(2000, 2100)):
self.log(f"[INFO] confirmed year lies between 2000 and 2100")
if special_feb_case and int(tyear) % 4 == 0:
self.log(
f"[INFO] entered year is confirmed leap year: {tyear}"
)
valid_date = True
tyear = tyear[-2:]
self.log(
f"[INFO] last 2 digits of input year stored: {tyear}"
)
break
elif special_feb_case and int(tyear) % 4 != 0:
self.log(
"[ERROR] entered year is not a leap year while date-month given by user is 29 Feb"
)
print("Non-Leap Year cannot have Feb 29\n")
break
else:
tyear = tyear[-2:]
self.log(
f"[INFO] last 2 digits of input year stored: {tyear}"
)
break
else:
self.log(f"[ERROR] invalid year received: {tyear}")
print("Invalid Year entered\n")
if valid_date:
break
while True: # ask for hours
self.log("[WAITING] for hours input")
thour = input("Hours (24h format): ").strip()
self.log(f"[INFO] received hour input: {thour}")
if thour.lower() == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_time()")
return 0, 0, 0, 0, 0
elif thour.isdecimal() and (int(thour) in range(24)) and thour != "":
self.log("[INFO] confirmed valid input for hours")
if len(thour) > 2:
thour = thour[-2:]
thour = thour.zfill(2)
self.log(f"[INFO] stored hours: {thour}")
break
else:
self.log(
f"[ERROR] Earth doesn't have these amount of hours in a day: {thour}"
)
print("Invalid hours entered\n")
while True: # ask for minutes
self.log("[WAITING] for minutes input")
tmin = input("Minutes: ").strip()
self.log(f"[INFO] minute input received: {tmin}")
if tmin == "cancel":
self.log("[INFO] user chose to cancel new task addition")
self.log("[FUNCTION] ends -> new_task_time()")
return 0, 0, 0, 0, 0
elif tmin.isdecimal() and (int(tmin) in range(60)) and tmin != "":
self.log("[INFO] confirmed valid input for minutes")
if len(tmin) > 2:
tmin = tmin[-2:]
tmin = tmin.zfill(2)
self.log(f"[INFO] stored mins: {tmin}")
break
else:
self.log(f"[ERROR] invalid minutes value entered: {tmin}")
print("Invalid minutes entered\n")
self.log(f"[INFO] 5 values returned: {tmin}, {thour}, {tdate}, {tmonth}, {tyear}")
self.log("[FUNCTION] ends -> new_task_time()")
return tmin, thour, tdate, tmonth, tyear
def new_task(self, monthsdict, monthnamesdict):
self.log("[FUNCTION] starts -> new_task()")
self.log("[INFO] calling related functions...")
taskinfo = self.new_task_name()
if taskinfo == "cancel":
self.info_bar("task addition cancelled", monthsdict)
else:
tmin, thour, tdate, tmonth, tyear = self.new_task_time(
monthsdict, monthnamesdict
)
if (tmin, thour, tdate, tmonth, tyear) == (0, 0, 0, 0, 0):
self.info_bar("task addition cancelled", monthsdict)
else:
taskcell = f"{tyear}:{tmonth}:{tdate}:{thour}:{tmin}={taskinfo}"
self.log("[INFO] combined values of new_task_name() and new_task_time()")
self.log(f"[INFO] {taskcell}")
self.log("[INFO] calling function add(new)")
self.add(taskcell)
self.info_bar("new task added", monthsdict)
self.log("[FUNCTION] ends -> new_task()")
def add(self, new):
self.log("[FUNCTION] starts -> add(new)")
self.log(f"[INFO] appending [{new}] to 'tasks.txt'")
taskfile = open(self.tasks_path, "a")
taskfile.write("\n" + new)
taskfile.close()
self.log("[INFO] appended successfully")
self.log("[FUNCTION] ends -> add(new)")
class App(Functions):
def console_loop(self):
try:
self.check_tasky_folders()
self.log(f">> >> >> >> >> >> >> >> >> >> >> >> >> >>")
self.log(f"[PROGRAM STARTED]")
self.check_tasky_log()
self.check_tasks_txt()
self.log("[INFO] imported datetime and os modules")
months, month_names = self.make_dicts()
self.log("[INFO] printing pending tasks details...")
self.info_bar("type 'help' to view valid commands", months)
n = 0
while True:
cookie, ckdir, cookie_count = self.cookie_dir()
task_list = self.read_and_sort_tasks_file()
total_tasks = len(task_list)
self.log(f"[INFO] current total number of tasks: {total_tasks}")
if cookie and cookie_count > 0:
print("Your cookies:" + " @" * cookie_count)
self.log("[WAITING] FOR MAIN USER INPUT")
user_inp = input("\n > ").lower().lstrip()
self.log(f"[INFO] user input: {user_inp}")
words = user_inp.split()
if user_inp != "":
self.log("[INFO] user input empty = False")
if user_inp.startswith(("quit", "bye")):
self.log("[INFO] user chose to exit program")
sysend()
elif user_inp.startswith("debug"):
self.log("[DEBUG] opening logs folder for debugging")
self.info_bar("opening logs folder for debugging", months)
startfile(self.taskylog_path)
elif user_inp.startswith("help"):
self.log("[INFO] user chose help, displaying available commands")
self.info_bar("displaying available commands", months)
print(
"add / new / create".rjust(35)
+ " : Add a new Task\n"
+ "remove N / delete N / del N / rem N : Remove task number 'N'\n"
+ "(press enter key) / status / ref".rjust(35)
+ " : Refresh the remaining tasks list\n"
+ "edit N / change N / ed N".rjust(35)
+ " : Modify task number 'N' details"
)
print("quit / q / bye".rjust(35) + " : Exit Tasky")
if cookie:
if cookie_count > 0:
print("\n type 'eat cookie' to eat your cookie")
elif cookie_count == 0:
print(
"\n you're out of cookies :(\n(type 'cookie' to hopefully get a cookie)"
)
elif user_inp.startswith(("add", "new", "create")):
self.log("[INFO] user requested to add a new task")
while True:
self.log("[WAITING] for confirmation")
confirm = input("\nConfirm new task? ").lower()
self.log(f"[INFO] confirmation input: {confirm}")
if confirm != "" and confirm[0] == "y":
self.log("[INFO] confirmed")
self.info_bar(
"type 'cancel' to stop task addition", months
)
self.new_task(months, month_names)
n = 0
self.log("[INFO] output screen refreshed with tasks")
break
elif confirm != "" and confirm[0] == "n":
self.log("[INFO] cancelled")
self.info_bar("new task cancelled", months)
self.log("[INFO] output screen refreshed with tasks")
break
else:
self.log(
f"[ERROR] oonga boonga man wrote '{confirm}' instead of yes/no"
)
self.info_bar("please enter yes/no", months)
elif (
("remove" == words[0])
or ("delete" == words[0])
or ("del" == words[0])
or ("rem" == words[0])
):
if len(words) == 2 and words[1].isdecimal():
self.log(
f"[INFO] user requested to remove task number {words[1]}"
)
if int(words[1]) in range(1, total_tasks + 1):
self.log(f"[INFO] task number confirmed valid")
while True:
self.log("[WAITING] for confirmation")
confirm = input(
f"\nConfirm removal of task {words[1]}? "
).lower()
self.log(f"[INFO] confirmation input: {confirm}")
if confirm != "" and confirm[0] == "y":
self.log("[INFO] confirmed")
self.remove(words[1], months)
n = 0
self.log(
"[INFO] refreshed output screen with new tasks"
)
break
elif confirm != "" and confirm[0] == "n":
self.log("[INFO] cancelled")
self.info_bar("task removal cancelled", months)
self.log(
"[INFO] refreshed output screen with new tasks"
)
break
else:
self.log(
f"[ERROR] oonga boonga man wrote '{confirm}' instead of yes/no"
)
self.info_bar("please enter yes/no", months)
else:
self.log(
f"[ERROR] task {words[1]} doesn't exist, total tasks = {total_tasks}"
)
self.info_bar(
f"invalid task number ({words[1]}) to be removed",
months,
)
else:
self.log(f"[ERROR] command used incorrectly: {user_inp}")
self.info_bar("error! try again like 'remove 5'", months)
elif (
("edit" == words[0])
or ("change" == words[0])
or ("ed" == words[0])
):
if len(words) == 2 and words[1].isdecimal():
self.log(
f"[INFO] user requested to edit task number {words[1]}"
)
if int(words[1]) in range(1, total_tasks + 1):
self.log(f"[INFO] task number confirmed valid")
while True:
self.log("[WAITING] for confirmation")
confirm = input(
f"\nConfirm edit of task {words[1]}? "
).lower()
if confirm != "" and confirm[0] == "y":
self.log("[INFO] confirmed")
self.edit_task(words[1], months, month_names)
n = 0
self.log(
"[INFO] refreshed output screen with new tasks"
)
break
elif confirm != "" and confirm[0] == "n":
self.log("[INFO] cancelled")
print("Task edit cancelled")
self.info_bar("task edit cancelled", months)
self.log(
"[INFO] refreshed output screen with new tasks"
)
break
else:
self.log(
f"[ERROR] oonga boonga man wrote '{confirm}' instead of yes/no"
)
self.info_bar("please enter yes/no", months)
else:
self.log(
f"[ERROR] task {words[1]} doesn't exist, total tasks = {total_tasks}"
)
self.info_bar(
f"invalid task number ({words[1]}) to be edited",
months,
)
else:
self.log(f"[ERROR] command used incorrectly: {user_inp}")
self.info_bar("error! try again like 'edit 4'", months)
elif user_inp.startswith(("ref", "status")):
self.log(f"[INFO] user requested updated task status: {user_inp}")
self.log("[INFO] refreshing output screen with new tasks")
self.info_bar("refreshed tasks list", months)
# (not so) secret commands
elif user_inp.startswith(("hi", "hello", "hey")):
hello_list = [
"hello there :)",
"hii :D",
"hey-hey user ;)",
"hola mi amigo ^-^",
"hi again? ;)",
"hey :)",
"hehe hello ^o^",
"hello! type 'help' for other commands",
"hi, view other commands! (type help)",
"isn't that enough greeting for now?",
"dear user, please get 'help' (literally)",
f"please stop... with the hellos",
"don't-",
"don't you have anything else to do",
"jeez how many times will you do this",
"fine i'll wait till you do something",
"still waiting...",
"Tasky isn't a chatbot...",
"did i refer to myself in third person?",
"GAAAAAAAAAAHHH STOP IT! WILL YOU?",
"you doing this to annoy me?",
"computers can't get annoyed, can they?",
"what do you want >_<",
"thanks, i hate these greetings now",
"seriously what do you want?",
"you want food?",
"i have some cookies",
"i wanna eat these though",
"but you won't stop... hmmph",
"aaaah you want a cookie?",
"ugh, fine. you earned a cookie i guess",
"here take this cookie. keep it with you",
"I GAVE YOU A COOKIE, PLEASE STOP",
"dont look at my other cookies",
"no more cookies for you >:(",
"enough with these greetings >_<",
"i wont respond to these keywords now",
"nope. not doing it.",
"ill be here if you need me >:(",
"not responding to greetings for real now",
"go complete your tasks user :/",
"BYE",
"type 'help' ._.",
]
self.log(f"[INFO] user greeted Tasky")
if not cookie:
if n == 31:
cookie = True
ckdir.mkdir(parents=True,exist_ok=True)
cookie_count += 1
cookiefile = open(self.cookie_folder_path / "cookies.txt", "w")
cookiefile.write(str(cookie_count))
cookiefile.close()
self.info_bar(hello_list[n], months)
else:
if n == 30:
self.info_bar(
"you did get a cookie last time from me", months
)
elif n == 31:
if cookie_count == 0:
self.info_bar("you ate it yourself...", months)
else:
self.info_bar("dont ask for another cookie", months)
elif n == 32:
self.info_bar("im hungry too >:(", months)
else:
self.info_bar(hello_list[n], months)
if n != 42:
n += 1
elif (
user_inp.startswith(":)")
or user_inp.upper().startswith(":D")
or user_inp.startswith(":(")
or user_inp.startswith(":>")
or user_inp.startswith(":<")
):
self.log(f"[INFO] {user_inp[:2]}")
self.info_bar(f"{user_inp[:2].upper()}", months)
elif (
user_inp.startswith(">:(")
or user_inp.upper().startswith(">:)")
or user_inp.startswith("._.")
or user_inp.startswith(".-.")
or user_inp.lower().startswith("o_o")
):
self.log(f"[INFO] {user_inp[:3]}")
self.info_bar(f"{user_inp[:3].upper()}", months)
elif words[0] == "cookie":
if cookie:
if 0 <= cookie_count < 15:
find = randint(1, 23)
if find == 22:
self.info_bar(
"ooh! found a cookie. ugh fine take it", months
)
cookie_count += 1
cookiefile = open(self.cookie_folder_path / "cookies.txt", "w")
cookiefile.write(str(cookie_count))
cookiefile.close()
else:
self.info_bar(
"didn't find any spare cookies, go away", months
)
elif cookie_count == 15:
self.info_bar(
"you have 15 cookies, eat them first", months
)
else:
self.info_bar(
"cookie? type 'help' for valid commands", months
)
elif words[0] == "eat" and words[1] == "cookie":
if cookie:
if cookie_count > 0:
self.info_bar("huh? what was that crunch sound", months)
cookie_count -= 1
cookiefile = open(self.cookie_folder_path / "cookies.txt", "w")
cookiefile.write(str(cookie_count))
cookiefile.close()
elif cookie_count == 0:
self.info_bar("you're out of cookies, lol so sad", months)
else:
self.info_bar("eat what again? type 'help'", months)
else:
self.log(f"[ERROR] command doesn't exist: {user_inp}")
self.info_bar("type 'help' to view valid commands", months)
else:
self.log(
f"[ERROR] i feel empty inside :( just like the user's input..."
)
self.info_bar("type 'help' to view valid commands", months)
self.log("[INFO] main loop rerunning...")
except SystemExit:
self.log(f"[EXIT] Program closed")
if __name__ == "__main__":
app = App()
app.console_loop()
|
147542
|
bl_info = {
"name": "New Object",
"author": "YourNameHere",
"version": (1, 0),
"blender": (2, 5, 5),
"api": 33333,
"location": "View3D > Add > Mesh > New Object",
"description": "Adds a new Mesh Object",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Add Mesh"}
import bpy
from bpy.props import FloatVectorProperty
from add_utils import AddObjectHelper, add_object_data
from mathutils import Vector
def add_object(self, context):
scale_x = self.scale.x
scale_y = self.scale.y
verts = [Vector((-1 * scale_x, 1 * scale_y, 0)),
Vector((1 * scale_x, 1 * scale_y, 0)),
Vector((1 * scale_x, -1 * scale_y, 0)),
Vector((-1 * scale_x, -1 * scale_y, 0)),
]
edges = []
faces = [[0, 1, 2, 3]]
mesh = bpy.data.meshes.new(name='New Object Mesh')
mesh.from_pydata(verts, edges, faces)
# useful for development when the mesh may be invalid.
# mesh.validate(verbose=True)
add_object_data(context, mesh, operator=self)
class OBJECT_OT_add_object(bpy.types.Operator, AddObjectHelper):
"""Add a Mesh Object"""
bl_idname = "mesh.add_object"
bl_label = "Add Mesh Object"
bl_description = "Create a new Mesh Object"
bl_options = {'REGISTER', 'UNDO'}
scale = FloatVectorProperty(
name='scale',
default=(1.0, 1.0, 1.0),
subtype='TRANSLATION',
description='scaling',
)
def execute(self, context):
add_object(self, context)
return {'FINISHED'}
# Registration
def add_object_button(self, context):
self.layout.operator(
OBJECT_OT_add_object.bl_idname,
text="Add Object",
icon="PLUGIN")
def register():
bpy.utils.register_class(OBJECT_OT_add_object)
bpy.types.INFO_MT_mesh_add.append(add_object_button)
def unregister():
bpy.utils.unregister_class(OBJECT_OT_add_object)
bpy.types.INFO_MT_mesh_add.remove(add_object_button)
if __name__ == '__main__':
register()
|
147549
|
from typing import Dict, List
from sqlalchemy.orm import Session
from src.db import models, schemas
def select_prediction_log_all(db: Session) -> List[schemas.PredictionLog]:
return db.query(models.PredictionLog).all()
def select_prediction_log_between(
db: Session,
time_before: str,
time_later: str,
) -> List[schemas.PredictionLog]:
return (
db.query(models.PredictionLog)
.filter(models.PredictionLog.created_datetime >= time_before)
.filter(models.PredictionLog.created_datetime <= time_later)
.all()
)
def select_outlier_log_all(db: Session) -> List[schemas.OutlierLog]:
return db.query(models.OutlierLog).all()
def select_outlier_log_between(
db: Session,
time_before: str,
time_later: str,
) -> List[schemas.OutlierLog]:
return (
db.query(models.OutlierLog)
.filter(models.OutlierLog.created_datetime >= time_before)
.filter(models.OutlierLog.created_datetime <= time_later)
.all()
)
def add_prediction_log(
db: Session,
log_id: str,
log: Dict,
commit: bool = True,
) -> schemas.PredictionLog:
data = models.PredictionLog(
log_id=log_id,
log=log,
)
db.add(data)
if commit:
db.commit()
db.refresh(data)
return data
def add_outlier_log(
db: Session,
log_id: str,
log: Dict,
commit: bool = True,
) -> schemas.OutlierLog:
data = models.OutlierLog(
log_id=log_id,
log=log,
)
db.add(data)
if commit:
db.commit()
db.refresh(data)
return data
|
147552
|
from raptiformica.distributed.exec import try_machine_command
def try_issue_shutdown(host_and_port_pairs):
"""
Iterate over host and port pairs and try to issue a shutdown on all nodes until
one returns a nonzero exit code. At that point return the standard out output.
If we ran out of host and port pairs to try, log a warning and return None
:param list[tuple, ..] host_and_port_pairs: A list of tuples containing host and ports
of remote hosts
:return str standard_out | None: 'consul exec' output or None
"""
issue_global_shutdown_command = ["consul", "exec", "'shutdown -h now'"]
attempt_message = "Trying to issue a global shutdown on {}:{}"
all_failed_message = "Failed to issue a global shutdown on any of the nodes."
output, _, _ = try_machine_command(
host_and_port_pairs,
issue_global_shutdown_command,
attempt_message=attempt_message,
all_failed_message=all_failed_message
)
return output
|
147607
|
import copy as cp
from qubiter.adv_applications.MeanHamil import *
from qubiter.device_specific.Qubiter_to_RigettiPyQuil import *
from qubiter.device_specific.RigettiTools import *
import qubiter.utilities_gen as utg
from openfermion.ops import QubitOperator
from pyquil.quil import Program, Pragma
from pyquil.gates import *
from pyquil.api import WavefunctionSimulator
class MeanHamil_rigetti(MeanHamil):
"""
This class is a child of MeanHamil.
This class uses either Rigetti's real hardware or virtual simulator to
calculate mean values. `qc` returned by Rigetti's get_qc() method is
passed in as an input to the constructor of this class. If num_samples
!=0, the class uses qc.run() to calculate mean values. If num_samples=0,
the class ignores the `qc` input and uses PyQuil's WavefunctionSimulator
to calculate mean values exactly.
Attributes
----------
do_resets : bool
pg : Program
object of PyQuil class `Program`
qc : QuantumComputer
returned by PyQuil method get_qc()
term_to_exec : dict[]
maps a term to an executable. QubitOperator from OpenFermion has
attribute `terms` which is a dict from a term to a coefficient. An
executable is the output of PyQuil's compile() method.
translation_line_list : list[str]
a list of lines of PyQuil code generated by the translator. The
lines all start with "pg +="
translator : Qubiter_to_RigettiPyQuil
"""
def __init__(self, qc, file_prefix, num_qbits, hamil,
all_var_nums, fun_name_to_fun,
do_resets=True, **kwargs):
"""
Constructor
Do in constructor as much hamil indep stuff as possible so don't
have to redo it with every call to cost fun. Also,
when self.num_samples !=0, we store a dict called term_to_exec
mapping an executable (output of Rigetti compile() function) to a
term, for each term in the hamiltonian hamil. When num_samples=0,
term_to_exec={}
Parameters
----------
qc : QuantumComputer
file_prefix : str
num_qbits : int
hamil : QubitOperator
all_var_nums : list[int]
fun_name_to_fun : dict[str, function]
do_resets : bool
kwargs : dict
key-words args of MeanHamilMinimizer constructor
Returns
-------
"""
MeanHamil.__init__(self, file_prefix, num_qbits, hamil,
all_var_nums, fun_name_to_fun, **kwargs)
self.qc = qc
self.do_resets = do_resets
# this creates a file with all PyQuil gates that
# are independent of hamil. Gates may contain free parameters
self.translator = Qubiter_to_RigettiPyQuil(
self.file_prefix, self.num_qbits,
aqasm_name='RigPyQuil', prelude_str='', ending_str='')
with open(utg.preface(self.translator.aqasm_path), 'r') as fi:
self.translation_line_list = fi.readlines()
pg = Program()
self.pg = pg
if self.num_samples:
# pg prelude
pg += Pragma('INITIAL_REWIRING', ['"PARTIAL"'])
if self.do_resets:
pg += RESET()
ro = pg.declare('ro', 'BIT', self.num_qbits)
s = ''
for var_num in self.all_var_nums:
vname = self.translator.vprefix + str(var_num)
s += vname
s += ' = pg.declare("'
s += vname
s += '", memory_type="REAL")\n'
exec(s)
# add to pg the operations that are independent of hamil
for line in self.translation_line_list:
line = line.strip('\n')
if line:
exec(line)
len_pg_in = len(pg)
# hamil loop to store executables for each term in hamil
self.term_to_exec = {}
for term, coef in self.hamil.terms.items():
# reset pg to initial length.
# Temporary work-around to bug
# in PyQuil ver 2.5.0.
# Slicing was changing
# pg from type Program to type list
pg = Program(pg[:len_pg_in])
self.pg = pg
# add xy measurements coda to pg
bit_pos_to_xy_str =\
{bit: action for bit, action in term if action != 'Z'}
RigettiTools.add_xy_meas_coda_to_program(
pg, bit_pos_to_xy_str)
# request measurements
for i in range(self.num_qbits):
pg += MEASURE(i, ro[i])
pg.wrap_in_numshots_loop(shots=self.num_samples)
executable = self.qc.compile(pg)
# print(",,,...", executable)
self.term_to_exec[term] = executable
def get_mean_val(self, var_num_to_rads):
"""
This method returns the empirically determined Hamiltonian mean
value. It takes as input the values of placeholder variables. It
passes those values into the Rigetti method run() when num_samples
!=0. When num_samples=0, WavefunctionSimulator is used to calculate
the output mean value exactly.
Parameters
----------
var_num_to_rads : dict[int, float]
Returns
-------
float
"""
# hamil loop
mean_val = 0
for term, coef in self.hamil.terms.items():
# we have checked before that coef is real
coef = complex(coef).real
vprefix = self.translator.vprefix
var_name_to_rads = {vprefix + str(vnum): [rads]
for vnum, rads in var_num_to_rads.items()}
if self.num_samples:
# send and receive from cloud, get obs_vec
bitstrings = self.qc.run(self.term_to_exec[term],
memory_map=var_name_to_rads)
obs_vec = RigettiTools.obs_vec_from_bitstrings(
bitstrings, self.num_qbits, bs_is_array=True)
# go from obs_vec to effective state vec
counts_dict = StateVec.get_counts_from_obs_vec(self.num_qbits,
obs_vec)
emp_pd = StateVec.get_empirical_pd_from_counts(self.num_qbits,
counts_dict)
effective_st_vec = StateVec.get_emp_state_vec_from_emp_pd(
self.num_qbits, emp_pd)
else: # num_samples = 0
sim = WavefunctionSimulator()
pg = Program()
# don't know how to declare number of qubits
# so do this
for k in range(self.num_qbits):
pg += I(k)
for key, val in var_name_to_rads.items():
exec(key + '=' + str(val[0]))
for line in self.translation_line_list:
line = line.strip('\n')
if line:
exec(line)
bit_pos_to_xy_str =\
{bit: action for bit, action in term if action != 'Z'}
RigettiTools.add_xy_meas_coda_to_program(
pg, bit_pos_to_xy_str)
st_vec_arr = sim.wavefunction(pg).amplitudes
st_vec_arr = st_vec_arr.reshape([2]*self.num_qbits)
perm = list(reversed(range(self.num_qbits)))
st_vec_arr = np.transpose(st_vec_arr, perm)
effective_st_vec = StateVec(self.num_qbits, st_vec_arr)
# add contribution to mean
real_arr = self.get_real_vec(term)
mean_val += coef*effective_st_vec.\
get_mean_value_of_real_diag_mat(real_arr)
return mean_val
if __name__ == "__main__":
def main():
pass
|
147636
|
r"""undocumented
这个页面的代码很大程度上参考(复制粘贴)了https://github.com/huggingface/pytorch-pretrained-BERT的代码, 如果你发现该代码对你
有用,也请引用一下他们。
"""
__all__ = [
"BertModel",
]
import copy
import json
import math
import os
import torch
from torch import nn
import numpy as np
from ...io.file_utils import _get_file_name_base_on_postfix
from ...io.file_utils import _get_bert_dir
from ...core import logger
CONFIG_FILE = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
BERT_KEY_RENAME_MAP_1 = {
'gamma': 'weight',
'beta': 'bias',
'distilbert.embeddings': 'bert.embeddings',
'distilbert.transformer': 'bert.encoder',
}
BERT_KEY_RENAME_MAP_2 = {
'q_lin': 'self.query',
'k_lin': 'self.key',
'v_lin': 'self.value',
'out_lin': 'output.dense',
'sa_layer_norm': 'attention.output.LayerNorm',
'ffn.lin1': 'intermediate.dense',
'ffn.lin2': 'output.dense',
'output_layer_norm': 'output.LayerNorm',
}
class BertConfig(object):
r"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
architectures='bert'):
r"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.architectures = architectures
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
r"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
r"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
r"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
r"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
r""" Save this instance to a json file."""
if os.path.isdir(json_file_path):
json_file_path = os.path.join(json_file_path, CONFIG_FILE)
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
def save_pretrained(self, save_directory):
self.to_json_file(save_directory)
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
BertLayerNorm = torch.nn.LayerNorm
class DistilBertEmbeddings(nn.Module):
def __init__(self, config):
super(DistilBertEmbeddings, self).__init__()
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(n_pos=config.max_position_embeddings,
dim=config.hidden_size,
out=self.position_embeddings.weight)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids, position_ids=None):
r"""
Parameters
----------
input_ids: torch.tensor(bs, max_seq_length)
The token ids to embed.
token_type_ids: no used.
position_ids: no used.
Outputs
-------
embeddings: torch.tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
class BertEmbeddings(nn.Module):
r"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, words_embeddings=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if words_embeddings is None:
words_embeddings = self.word_embeddings(input_ids)
else:
assert input_ids.size() == words_embeddings.size()[: -1]
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config, num_output_layer=-1):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
num_output_layer = num_output_layer if num_output_layer >= 0 else (len(self.layer) + num_output_layer)
self.num_output_layer = max(min(num_output_layer, len(self.layer)), 0)
if self.num_output_layer + 1 < len(self.layer):
logger.info(f'The transformer encoder will early exit after layer-{self.num_output_layer} '
f'(layer 0 means embedding layer)!')
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for idx, layer_module in enumerate(self.layer):
if idx >= self.num_output_layer:
break
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(nn.Module):
r"""
BERT(Bidirectional Embedding Representations from Transformers).
用预训练权重矩阵来建立BERT模型::
model = BertModel.from_pretrained(model_dir_or_name)
用随机初始化权重矩阵来建立BERT模型::
model = BertModel()
:param int vocab_size: 词表大小,默认值为30522,为BERT English uncase版本的词表大小
:param int hidden_size: 隐层大小,默认值为768,为BERT base的版本
:param int num_hidden_layers: 隐藏层数,默认值为12,为BERT base的版本
:param int num_attention_heads: 多头注意力头数,默认值为12,为BERT base的版本
:param int intermediate_size: FFN隐藏层大小,默认值是3072,为BERT base的版本
:param str hidden_act: FFN隐藏层激活函数,默认值为``gelu``
:param float hidden_dropout_prob: FFN隐藏层dropout,默认值为0.1
:param float attention_probs_dropout_prob: Attention层的dropout,默认值为0.1
:param int max_position_embeddings: 最大的序列长度,默认值为512,
:param int type_vocab_size: 最大segment数量,默认值为2
:param int initializer_range: 初始化权重范围,默认值为0.02
"""
def __init__(self, config, *inputs, **kwargs):
super(BertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
super(BertModel, self).__init__()
self.config = config
self.hidden_size = self.config.hidden_size
self.model_type = 'bert'
neg_num_output_layer = kwargs.get('neg_num_output_layer', -1)
pos_num_output_layer = kwargs.get('pos_num_output_layer', self.config.num_hidden_layers)
self.num_output_layer = max(neg_num_output_layer + 1 + self.config.num_hidden_layers, pos_num_output_layer)
if hasattr(config, 'sinusoidal_pos_embds'):
self.model_type = 'distilbert'
elif 'model_type' in kwargs:
self.model_type = kwargs['model_type'].lower()
if self.model_type == 'distilbert':
self.embeddings = DistilBertEmbeddings(config)
else:
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, num_output_layer=self.num_output_layer)
if self.model_type != 'distilbert':
self.pooler = BertPooler(config)
else:
logger.info('DistilBert has NOT pooler, will use hidden states of [CLS] token as pooled output.')
self.apply(self.init_bert_weights)
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def init_bert_weights(self, module):
r""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
position_ids=None):
"""
:param torch.LongTensor input_ids: bsz x max_len的输入id
:param torch.LongTensor token_type_ids: bsz x max_len,如果不输入认为全为0,一般第一个sep(含)及以前为0, 一个sep之后为1
:param attention_mask: 需要attend的为1,不需要为0
:param bool output_all_encoded_layers: 是否输出所有层,默认输出token embedding(包含bpe, position以及type embedding)
及每一层的hidden states。如果为False,只输出最后一层的结果
:param torch.LongTensor position_ids: bsz x max_len, position的id
:return: encode_layers: 如果output_all_encoded_layers为True,返回list(共num_layers+1个元素),每个元素为
bsz x max_len x hidden_size否则返回bsz x max_len x hidden_size的tensor;
pooled_output: bsz x hidden_size为cls的表示,可以用于句子的分类
"""
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# this will case an issue when DataParallel: https://github.com/pytorch/pytorch/issues/40457#issuecomment-648396469
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = extended_attention_mask.to(self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
encoded_layers.insert(0, embedding_output)
sequence_output = encoded_layers[-1]
if self.model_type != 'distilbert':
pooled_output = self.pooler(sequence_output)
else:
pooled_output = sequence_output[:, 0]
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
@classmethod
def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs):
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
kwargs.pop('cache_dir', None)
kwargs.pop('from_tf', None)
# get model dir from name or dir
pretrained_model_dir = _get_bert_dir(model_dir_or_name)
# Load config
config_file = _get_file_name_base_on_postfix(pretrained_model_dir, '.json')
config = BertConfig.from_json_file(config_file)
if state_dict is None:
weights_path = _get_file_name_base_on_postfix(pretrained_model_dir, '.bin')
state_dict = torch.load(weights_path, map_location='cpu')
else:
logger.error(f'Cannot load parameters through `state_dict` variable.')
raise RuntimeError(f'Cannot load parameters through `state_dict` variable.')
model_type = 'BERT'
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'bert' not in key:
new_key = 'bert.' + key
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
for key_name in BERT_KEY_RENAME_MAP_1:
if key_name in key:
new_key = key.replace(key_name, BERT_KEY_RENAME_MAP_1[key_name])
if 'distilbert' in key:
model_type = 'DistilBert'
break
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
for key_name in BERT_KEY_RENAME_MAP_2:
if key_name in key:
new_key = key.replace(key_name, BERT_KEY_RENAME_MAP_2[key_name])
break
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Instantiate model.
model = cls(config, model_type=model_type, *inputs, **kwargs)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.warning("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.debug("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
logger.info(f"Load pre-trained {model_type} parameters from file {weights_path}.")
return model
def save_pretrained(self, save_directory):
""" 保存模型到某个folder
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.debug("Model weights saved in {}".format(output_model_file))
|
147700
|
from random import randint
import json
import time
import requests
import vk
# Настройки
with open('keys.json', 'r') as file:
VK_API_ACCESS_TOKEN = json.loads(file.read())['vk']['token']
with open('sets.json', 'r') as file:
sets = json.loads(file.read())
GROUP_ID = sets['vk']['group']
SERVER_LINK = sets['server']['link']
CLIENT_LINK = sets['client']['link']
VK_API_VERSION = '5.95'
session = vk.Session(access_token=VK_API_ACCESS_TOKEN)
api = vk.API(session, v=VK_API_VERSION)
# Первый запрос к LongPoll: получаем server и key
longPoll = api.groups.getLongPollServer(group_id=GROUP_ID)
server, key, ts = longPoll['server'], longPoll['key'], longPoll['ts']
while True:
# Последующие запросы: меняется только ts
try:
longPoll = requests.post(server, data={
'act': 'a_check',
'key': key,
'ts': ts,
'wait': 25,
}).json()
except:
print('Error 1')
time.sleep(1)
continue
if 'updates' in longPoll and len(longPoll['updates']):
for update in longPoll['updates']:
print(update['object'])
# Приём сообщений
# if update['object']['from_id'] < 0: # and update['object']['peer_id'] != update['object']['from_id']:
# continue
if update['type'] == 'message_new':
api.messages.send(
peer_id=update['object']['peer_id'],
random_id=randint(-2147483648, 2147483647),
message='идентификатор #{}/{}'.format(update['object']['from_id'], update['object']['peer_id']),
)
# Меняем ts для следующего запроса
try:
ts = longPoll['ts']
except:
print('Error 2')
period = 0
while True:
period += 1
try:
session = vk.Session(access_token=VK_API_ACCESS_TOKEN)
api = vk.API(session, v=VK_API_VERSION)
# Первый запрос к LongPoll: получаем server и key
longPoll = api.groups.getLongPollServer(group_id=GROUP_ID)
server, key, ts = longPoll['server'], longPoll['key'], longPoll['ts']
except:
print('Error 3')
time.sleep(period)
else:
break
|
147715
|
import os
import sys
from setuptools import setup, find_packages
readme_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
'README.rst'))
try:
long_description = open(readme_file).read()
except IOError as err:
sys.stderr.write("[ERROR] Cannot find file specified as "
"long_description (%s)\n" % readme_file)
sys.exit(1)
extra_kwargs = {'tests_require': ['mock>1.0']}
if sys.version_info < (2, 7):
extra_kwargs['tests_require'].append('unittest2')
frogress = __import__('frogress')
setup(
name='frogress',
version=frogress.get_version(),
url='https://github.com/lukaszb/frogress',
author='<NAME>',
author_email='<EMAIL>',
description="A progress tool for humans",
long_description=long_description,
zip_safe=False,
packages=find_packages(),
license='MIT',
scripts=[],
test_suite='frogress.tests.collector',
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
**extra_kwargs
)
|
147733
|
import ctypes
import sys
import traceback
from ._k4arecordTypes import *
from ..k4a._k4atypes import *
record_dll = None
def setup_library(module_k4arecord_path):
global record_dll
try:
record_dll = ctypes.CDLL(module_k4arecord_path)
except Exception as e:
print("Failed to load library", e)
sys.exit(1)
def k4a_record_create(file_path, device, device_config, recording_handle):
"""
K4ARECORD_EXPORT k4a_result_t k4a_record_create(const char *path,
k4a_device_t device,
const k4a_device_configuration_t device_config,
k4a_record_t *recording_handle);
"""
_k4a_record_create = record_dll.k4a_record_create
_k4a_record_create.restype = k4a_result_t
_k4a_record_create.argtypes = (ctypes.POINTER(ctypes.c_char), \
k4a_device_t, \
k4a_device_configuration_t, \
ctypes.POINTER(k4a_record_t),\
)
return _k4a_record_create(file_path, device, device_config, recording_handle)
def k4a_record_write_header(recording_handle):
# K4ARECORD_EXPORT k4a_result_t k4a_record_write_header(k4a_record_t recording_handle);
_k4a_record_write_header = record_dll.k4a_record_write_header
_k4a_record_write_header.restype = k4a_result_t
_k4a_record_write_header.argtypes = (k4a_record_t,)
return _k4a_record_write_header(recording_handle)
def k4a_record_write_capture(recording_handle, capture_handle):
# K4ARECORD_EXPORT k4a_result_t k4a_record_write_capture(k4a_record_t recording_handle, k4a_capture_t capture_handle);
_k4a_record_write_capture = record_dll.k4a_record_write_capture
_k4a_record_write_capture.restype = k4a_result_t
_k4a_record_write_capture.argtypes = (k4a_record_t, \
k4a_capture_t)
return _k4a_record_write_capture(recording_handle, capture_handle)
def k4a_record_flush(recording_handle):
# K4ARECORD_EXPORT k4a_result_t k4a_record_flush(k4a_record_t recording_handle);
_k4a_record_flush = record_dll.k4a_record_flush
_k4a_record_flush.restype = k4a_result_t
_k4a_record_flush.argtypes = (k4a_record_t,)
return _k4a_record_flush(recording_handle)
def k4a_record_close(recording_handle):
# K4ARECORD_EXPORT void k4a_record_close(k4a_record_t recording_handle);
_k4a_record_close = record_dll.k4a_record_close
_k4a_record_close.restype = None
_k4a_record_close.argtypes = (k4a_record_t,)
_k4a_record_close(recording_handle)
###########################
### Playback ###
###########################
def k4a_playback_open(file_path, playback_handle):
# K4ARECORD_EXPORT k4a_result_t k4a_playback_open(const char *path, k4a_playback_t *playback_handle);
_k4a_playback_open = record_dll.k4a_playback_open
_k4a_playback_open.restype = k4a_result_t
_k4a_playback_open.argtypes = (ctypes.POINTER(ctypes.c_char), \
ctypes.POINTER(k4a_playback_t),)
return _k4a_playback_open(file_path, playback_handle)
def k4a_playback_close(playback_handle):
# K4ARECORD_EXPORT void k4a_playback_close(k4a_playback_t playback_handle);
_k4a_playback_close = record_dll.k4a_playback_close
_k4a_playback_close.restype = None
_k4a_playback_close.argtypes = (k4a_playback_t,)
_k4a_playback_close(playback_handle)
def k4a_playback_get_raw_calibration(playback_handle, data, data_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_raw_calibration(k4a_playback_t playback_handle,
uint8_t *data,
size_t *data_size);
"""
_k4a_playback_get_raw_calibration = record_dll.k4a_playback_get_raw_calibration
_k4a_playback_get_raw_calibration.restype = k4a_buffer_result_t
_k4a_playback_get_raw_calibration.argtypes = (k4a_playback_t, \
ctypes.POINTER(ctypes.c_uint8),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_raw_calibration(playback_handle, data, data_size)
def k4a_playback_get_calibration(playback_handle, calibration):
"""
K4ARECORD_EXPORT k4a_result_t k4a_playback_get_calibration(k4a_playback_t playback_handle,
k4a_calibration_t *calibration);
"""
_k4a_playback_get_calibration = record_dll.k4a_playback_get_calibration
_k4a_playback_get_calibration.restype = k4a_result_t
_k4a_playback_get_calibration.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_calibration_t))
return _k4a_playback_get_calibration(playback_handle, calibration)
def k4a_playback_get_record_configuration(playback_handle, config):
"""
K4ARECORD_EXPORT k4a_result_t k4a_playback_get_record_configuration(k4a_playback_t playback_handle,
k4a_record_configuration_t *config);
"""
_k4a_playback_get_record_configuration = record_dll.k4a_playback_get_record_configuration
_k4a_playback_get_record_configuration.restype = k4a_result_t
_k4a_playback_get_record_configuration.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_record_configuration_t))
return _k4a_playback_get_record_configuration(playback_handle, config)
def k4a_playback_check_track_exists(playback_handle, track_name):
"""
K4ARECORD_EXPORT bool k4a_playback_check_track_exists(k4a_playback_t playback_handle, const char *track_name);
"""
_k4a_playback_check_track_exists = record_dll.k4a_playback_check_track_exists
_k4a_playback_check_track_exists.restype = ctypes.c_bool
_k4a_playback_check_track_exists.argtypes = (k4a_playback_t, \
ctypes.POINTER(ctypes.c_char))
return _k4a_playback_check_track_exists(playback_handle, track_name)
def k4a_playback_get_track_count(playback_handle):
"""
K4ARECORD_EXPORT size_t k4a_playback_get_track_count(k4a_playback_t playback_handle);
"""
_k4a_playback_get_track_count = record_dll.k4a_playback_get_track_count
_k4a_playback_get_track_count.restype = ctypes.c_size_t
_k4a_playback_get_track_count.argtypes = (k4a_playback_t,)
return _k4a_playback_get_track_count(playback_handle)
def k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_track_name(k4a_playback_t playback_handle,
size_t track_index,
char *track_name,
size_t *track_name_size);
"""
_k4a_playback_get_track_name = record_dll.k4a_playback_get_track_name
_k4a_playback_get_track_name.restype = k4a_buffer_result_t
_k4a_playback_get_track_name.argtypes = (k4a_playback_t,\
ctypes.c_size_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size)
def k4a_playbk4a_playback_track_is_builtinack_get_track_name(playback_handle, track_name):
"""
K4ARECORD_EXPORT bool k4a_playback_track_is_builtin(k4a_playback_t playback_handle, const char *track_name);;
"""
_k4a_playback_track_is_builtin = record_dll.k4a_playback_track_is_builtin
_k4a_playback_track_is_builtin.restype = ctypes.c_bool
_k4a_playback_track_is_builtin.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char))
return _k4a_playback_track_is_builtin(playback_handle, track_name)
def k4a_playback_track_get_video_settings(playback_handle, track_name, video_settings):
"""
K4ARECORD_EXPORT k4a_result_t k4a_playback_track_get_video_settings(k4a_playback_t playback_handle,
const char *track_name,
k4a_record_video_settings_t *video_settings);
"""
_k4a_playback_track_get_video_settings = record_dll.k4a_playback_track_get_video_settings
_k4a_playback_track_get_video_settings.restype = k4a_result_t
_k4a_playback_track_get_video_settings.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(k4a_record_video_settings_t))
return _k4a_playback_track_get_video_settings(playback_handle, track_name, video_settings)
def k4a_playback_track_get_codec_id(playback_handle, track_name, codec_id, codec_id_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_track_get_codec_id(k4a_playback_t playback_handle,
const char *track_name,
char *codec_id,
size_t *codec_id_size);
"""
_k4a_playback_track_get_codec_id = record_dll.k4a_playback_track_get_codec_id
_k4a_playback_track_get_codec_id.restype = k4a_buffer_result_t
_k4a_playback_track_get_codec_id.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_track_get_codec_id(playback_handle, track_name, codec_id, codec_id_size)
def k4a_playback_track_get_codec_context(playback_handle, track_name, codec_context, codec_context_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_track_get_codec_context(k4a_playback_t playback_handle,
const char *track_name,
uint8_t *codec_context,
size_t *codec_context_size);
"""
_k4a_playback_track_get_codec_context = record_dll.k4a_playback_track_get_codec_context
_k4a_playback_track_get_codec_context.restype = k4a_buffer_result_t
_k4a_playback_track_get_codec_context.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_uint8),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_track_get_codec_context(playback_handle, track_name, codec_context, codec_context_size)
def k4a_playback_get_tag(playback_handle, name, value, value_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_tag(k4a_playback_t playback_handle,
const char *name,
char *value,
size_t *value_size);
"""
_k4a_playback_get_tag = record_dll.k4a_playback_get_tag
_k4a_playback_get_tag.restype = k4a_buffer_result_t
_k4a_playback_get_tag.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_tag(playback_handle, name, value, value_size)
def k4a_playback_set_color_conversion(playback_handle, target_format):
"""
K4ARECORD_EXPORT k4a_result_t k4a_playback_set_color_conversion(k4a_playback_t playback_handle,
k4a_image_format_t target_format);
"""
_k4a_playback_set_color_conversion = record_dll.k4a_playback_set_color_conversion
_k4a_playback_set_color_conversion.restype = k4a_result_t
_k4a_playback_set_color_conversion.argtypes = (k4a_playback_t,\
k4a_image_format_t)
return _k4a_playback_set_color_conversion(playback_handle, target_format)
def k4a_playback_get_attachment(playback_handle, file_name, data, data_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_attachment(k4a_playback_t playback_handle,
const char *file_name,
uint8_t *data,
size_t *data_size);
"""
_k4a_playback_get_attachment = record_dll.k4a_playback_get_attachment
_k4a_playback_get_attachment.restype = k4a_buffer_result_t
_k4a_playback_get_attachment.argtypes = (k4a_playback_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_uint8),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_attachment(playback_handle, file_name, data, data_size)
def k4a_playback_get_next_capture(playback_handle, capture_handle):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_capture(k4a_playback_t playback_handle,
k4a_capture_t *capture_handle);
"""
_k4a_playback_get_next_capture = record_dll.k4a_playback_get_next_capture
_k4a_playback_get_next_capture.restype = k4a_stream_result_t
_k4a_playback_get_next_capture.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_capture_t),)
return _k4a_playback_get_next_capture(playback_handle, capture_handle)
def k4a_playback_get_previous_capture(playback_handle, capture_handle):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_previous_capture(k4a_playback_t playback_handle,
k4a_capture_t *capture_handle);
"""
_k4a_playback_get_previous_capture = record_dll.k4a_playback_get_previous_capture
_k4a_playback_get_previous_capture.restype = k4a_stream_result_t
_k4a_playback_get_previous_capture.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_capture_t),)
return _k4a_playback_get_previous_capture(playback_handle, capture_handle)
def k4a_playback_get_next_imu_sample(playback_handle, imu_sample):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_imu_sample(k4a_playback_t playback_handle,
k4a_imu_sample_t *imu_sample);
"""
_k4a_playback_get_next_imu_sample = record_dll.k4a_playback_get_next_imu_sample
_k4a_playback_get_next_imu_sample.restype = k4a_stream_result_t
_k4a_playback_get_next_imu_sample.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_imu_sample_t),)
return _k4a_playback_get_next_imu_sample(playback_handle, imu_sample)
def k4a_playback_get_previous_imu_sample(playback_handle, imu_sample):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_previous_imu_sample(k4a_playback_t playback_handle,
k4a_imu_sample_t *imu_sample);
"""
_k4a_playback_get_previous_imu_sample = record_dll.k4a_playback_get_previous_imu_sample
_k4a_playback_get_previous_imu_sample.restype = k4a_stream_result_t
_k4a_playback_get_previous_imu_sample.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_imu_sample_t),)
return _k4a_playback_get_previous_imu_sample(playback_handle, imu_sample)
def k4a_playback_get_next_data_block(playback_handle, track_name, data_block_handle):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_data_block(k4a_playback_t playback_handle,
const char *track_name,
k4a_playback_data_block_t *data_block_handle);
"""
_k4a_playback_get_next_data_block = record_dll.k4a_playback_get_next_data_block
_k4a_playback_get_next_data_block.restype = k4a_stream_result_t
_k4a_playback_get_next_data_block.argtypes = (k4a_playback_t, \
ctypes.POINTER(ctypes.c_char),
ctypes.POINTER(k4a_playback_data_block_t),)
return _k4a_playback_get_next_data_block(playback_handle, track_name, data_block_handle)
def k4a_playback_get_previous_data_block(playback_handle, track_name, data_block_handle):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_previous_data_block(k4a_playback_t playback_handle,
const char *track_name,
k4a_playback_data_block_t *data_block_handle);
"""
_k4a_playback_get_previous_data_block = record_dll.k4a_playback_get_previous_data_block
_k4a_playback_get_previous_data_block.restype = k4a_stream_result_t
_k4a_playback_get_previous_data_block.argtypes = (k4a_playback_t, \
ctypes.POINTER(ctypes.c_char),
ctypes.POINTER(k4a_playback_data_block_t),)
return _k4a_playback_get_previous_data_block(playback_handle, track_name, data_block_handle)
def k4a_playback_data_block_get_device_timestamp_usec(data_block_handle):
"""
K4ARECORD_EXPORT uint64_t k4a_playback_data_block_get_device_timestamp_usec(k4a_playback_data_block_t data_block_handle);
"""
_k4a_playback_data_block_get_device_timestamp_usec = record_dll.k4a_playback_data_block_get_device_timestamp_usec
_k4a_playback_data_block_get_device_timestamp_usec.restype = ctypes.c_uint64
_k4a_playback_data_block_get_device_timestamp_usec.argtypes = (k4a_playback_data_block_t,)
return _k4a_playback_data_block_get_device_timestamp_usec(data_block_handle)
def k4a_playback_data_block_get_buffer_size(data_block_handle):
"""
K4ARECORD_EXPORT size_t k4a_playback_data_block_get_buffer_size(k4a_playback_data_block_t data_block_handle);
"""
_k4a_playback_data_block_get_buffer_size = record_dll.k4a_playback_data_block_get_buffer_size
_k4a_playback_data_block_get_buffer_size.restype = ctypes.c_size_t
_k4a_playback_data_block_get_buffer_size.argtypes = (k4a_playback_data_block_t,)
return _k4a_playback_data_block_get_buffer_size(data_block_handle)
def k4a_playback_data_block_get_buffer(data_block_handle):
"""
K4ARECORD_EXPORT uint8_t *k4a_playback_data_block_get_buffer(k4a_playback_data_block_t data_block_handle);
"""
_k4a_playback_data_block_get_buffer = record_dll.k4a_playback_data_block_get_buffer
_k4a_playback_data_block_get_buffer.restype = ctypes.POINTER(ctypes.c_uint8)
_k4a_playback_data_block_get_buffer.argtypes = (k4a_playback_data_block_t,)
return _k4a_playback_data_block_get_buffer(data_block_handle)
def k4a_playback_data_block_release(data_block_handle):
"""
K4ARECORD_EXPORT void k4a_playback_data_block_release(k4a_playback_data_block_t data_block_handle);
"""
_k4a_playback_data_block_release = record_dll.k4a_playback_data_block_release
_k4a_playback_data_block_release.restype = None
_k4a_playback_data_block_release.argtypes = (k4a_playback_data_block_t,)
return _k4a_playback_data_block_release(data_block_handle)
def k4a_playback_seek_timestamp(playback_handle, offset_usec, origin):
"""
K4ARECORD_EXPORT k4a_result_t k4a_playback_seek_timestamp(k4a_playback_t playback_handle,
int64_t offset_usec,
k4a_playback_seek_origin_t origin);
"""
_k4a_playback_seek_timestamp = record_dll.k4a_playback_seek_timestamp
_k4a_playback_seek_timestamp.restype = k4a_result_t
_k4a_playback_seek_timestamp.argtypes = (k4a_playback_t,\
ctypes.c_int64,\
k4a_playback_seek_origin_t)
return _k4a_playback_seek_timestamp(playback_handle, offset_usec, origin)
def k4a_playback_get_recording_length_usec(playback_handle):
"""
K4ARECORD_EXPORT uint64_t k4a_playback_get_recording_length_usec(k4a_playback_t playback_handle);
"""
_k4a_playback_get_recording_length_usec = record_dll.k4a_playback_get_recording_length_usec
_k4a_playback_get_recording_length_usec.restype = ctypes.c_uint64
_k4a_playback_get_recording_length_usec.argtypes = (k4a_playback_t,)
return _k4a_playback_get_recording_length_usec(playback_handle)
def k4a_playback_get_last_timestamp_usec(playback_handle):
"""
K4ARECORD_DEPRECATED_EXPORT uint64_t k4a_playback_get_last_timestamp_usec(k4a_playback_t playback_handle);
"""
_k4a_playback_get_last_timestamp_usec = record_dll.k4a_playback_get_last_timestamp_usec
_k4a_playback_get_last_timestamp_usec.restype = ctypes.c_uint64
_k4a_playback_get_last_timestamp_usec.argtypes = (k4a_playback_t,)
return _k4a_playback_get_last_timestamp_usec(playback_handle)
def VERIFY(result, error):
if result != K4A_RESULT_SUCCEEDED:
print(error)
traceback.print_stack()
sys.exit(1)
|
147770
|
from django.apps import AppConfig
from logux import settings
from logux.utils import autodiscover
class LoguxConfig(AppConfig):
""" Logux app conf """
name = 'logux'
verbose_name = 'Logux'
def ready(self):
# check if all required settings is defined
settings.get_config()
# import all logux_actions.py and logux_subscriptions.py from consumer modules
autodiscover()
|
147794
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from matplotlib import pyplot as plt
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from tqdm import tqdm
CWD_PATH = '/home/ubuntu/rue/object_detector/open_images_fashion'
PATH_TO_CKPT = os.path.join(CWD_PATH, 'export/frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH, 'openimage_label_map.pbtxt')
IMAGE_SIZE = (12, 8)
PATH_TO_TEST_IMAGES_DIR = '/home/ubuntu/rue/object_detector/open_images_fashion/challenge2018_test'
TEST_IMAGE_PATHS = os.listdir(PATH_TO_TEST_IMAGES_DIR)
NUM_CLASSES = 493
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
class_names = pd.read_csv(os.path.join(CWD_PATH, 'class_names.csv'), header=None)
l2n_dict = {}
n2l_dict = {}
for i in range(class_names.shape[0]):
label = class_names.iloc[i,0]
name = class_names.iloc[i,1]
l2n_dict[label] = name
n2l_dict[name] = label
#end for
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
"""
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
"""
return boxes, scores, classes
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
#Load a frozen TF model
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
TOP_K = 3
SCORE_THRESHOLD = 0.15
submission_df = pd.DataFrame(columns=['ImageId', 'PredictionString'])
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
for idx, image_path in tqdm(enumerate(TEST_IMAGE_PATHS)):
image = Image.open(PATH_TO_TEST_IMAGES_DIR + '/' + image_path)
image_np = load_image_into_numpy_array(image)
image_name = image_path.split('/')[-1].split('.')[0]
image_boxes, image_scores, image_classes = detect_objects(image_np, sess, detection_graph)
image_scores_top = image_scores.flatten()[:TOP_K]
image_classes_top = image_classes.flatten()[:TOP_K]
prediction_str = ""
for i in range(TOP_K):
if (image_scores_top[i] > SCORE_THRESHOLD):
image_object_label = category_index[image_classes_top[i]]['name']
y_min, x_min, y_max, x_max = image_boxes[0,i,:]
#print(image_object_label)
#print(n2l_dict[image_object_label])
#print(image_object_box)
prediction_str += n2l_dict[image_object_label] + " " + str(round(image_scores_top[i], 2)) + " " + str(round(x_min, 4)) + " " + str(round(y_min, 4)) + " " + str(round(x_max, 4)) + " " + str(round(y_max, 4)) + " "
#end for
print("{},{}".format(image_name, prediction_str))
submission_df.loc[idx,'ImageId'] = image_name
submission_df.loc[idx,'PredictionString'] = prediction_str
submission_df.to_csv("./ssd_76880_top3_t015_with_scores_ordered.csv", index=False)
|
147797
|
from .solver import *
from .convscore_solver import *
from .convscorenegsamp_solver import *
from .convscorenegv1_solver import *
from .convscorencev1_solver import *
from .ruberu_solver import *
from .adem_solver import *
|
147815
|
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import Adam
from transformer_keras import get_or_create, save_config
from transformer_keras.custom.callbacks import SGDRScheduler, LRFinder, WatchScheduler, LRSchedulerPerStep
from transformer_keras.data_loader import DataLoader
if __name__ == '__main__':
train_file_path = "../data/en2de.s2s.txt"
valid_file_path = "../data/en2de.s2s.valid.txt"
config_save_path = "../data/default-config.json"
weights_save_path = "../models/weights.{epoch:02d}-{val_loss:.2f}.h5"
init_weights_path = "../models/weights.36-3.04.h5"
src_dict_path = "../data/dict_en.json"
tgt_dict_path = "../data/dict_de.json"
batch_size = 64
epochs = 32
# Data Loader
data_loader = DataLoader(src_dictionary_path=src_dict_path,
tgt_dictionary_path=tgt_dict_path,
batch_size=batch_size)
steps_per_epoch = 28998 // data_loader.batch_size
validation_steps = 1014 // data_loader.batch_size
config = {
"src_vocab_size": data_loader.src_vocab_size,
"tgt_vocab_size": data_loader.tgt_vocab_size,
"model_dim": 512,
"src_max_len": 70,
"tgt_max_len": 70,
"num_layers": 2,
"num_heads": 8,
"ffn_dim": 512,
"dropout": 0.1
}
# Get transformer use config and load weights if exists.
transformer = get_or_create(config,
optimizer=Adam(1e-3, 0.9, 0.98, epsilon=1e-9),
weights_path=init_weights_path)
# save config
save_config(transformer, config_save_path)
ck = ModelCheckpoint(weights_save_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
verbose=0)
log = TensorBoard(log_dir='../logs',
histogram_freq=0,
batch_size=data_loader.batch_size,
write_graph=True,
write_grads=False)
# Use LRFinder to find effective learning rate
lr_finder = LRFinder(1e-6, 1e-2, steps_per_epoch, epochs=3) # => (3e-5, 5e-4)
# lr_scheduler = WatchScheduler(lambda _, lr: lr / 2, min_lr=3e-5, max_lr=5e-4, watch="val_loss", watch_his_len=3)
# lr_scheduler = LRSchedulerPerStep(512)
lr_scheduler = SGDRScheduler(min_lr=4e-5, max_lr=5e-4, steps_per_epoch=steps_per_epoch,
cycle_length=15,
lr_decay=0.8,
mult_factor=1.5)
transformer.model.fit_generator(data_loader.generator(train_file_path),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=data_loader.generator(valid_file_path),
validation_steps=validation_steps,
callbacks=[ck, log, lr_scheduler])
# lr_finder.plot_loss()
# lr_finder.plot_lr()
|
147832
|
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
# import sys
from time import time
import matplotlib.pyplot as plt
import os
import argparse as ap
# from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.svm import LinearSVC
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
opts = {}
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
opts['print_report'] = True
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
opts['select_chi2'] = 3
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
opts['print_cm'] = True
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
opts['print_top10'] = True
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
opts['all_categories'] = True
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
opts['use_hashing'] = True
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
opts['n_features'] = 2 ** 16
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
opts['filtered'] = False
op.add_option("--dataset",
action="store", type=str, default="dmoz-5",
help="n_features when using the hashing vectorizer.")
opts['dataset'] = 'dmoz-5'
opts = ap.Namespace(**opts)
# (opts, args) = op.parse_args()
# if len(args) > 0:
# op.error("this script takes no arguments.")
# sys.exit(1)
#
# print(__doc__)
# op.print_help()
# print()
###############################################################################
# Load some categories from the training set
root_path = "/Users/yuhui.lin/work/fastText/data/"
if opts.dataset == "dmoz-5":
data_path = os.path.join(root_path, "TFR_5-fast")
num_cats = 5
elif opts.dataset == "dmoz-10":
data_path = os.path.join(root_path, "TFR_10-fast")
num_cats = 10
elif opts.dataset == "ukwa":
data_path = os.path.join(root_path, "TFR_ukwa-fast")
num_cats = 10
else:
raise ValueError(opts.dataset)
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
# data_train = fetch_20newsgroups(subset='train', categories=categories,
# shuffle=True, random_state=42,
# remove=remove)
#
# data_test = fetch_20newsgroups(subset='test', categories=categories,
# shuffle=True, random_state=42,
# remove=remove)
def svm(num_cats, train_data, test_data):
# train_path = os.path.join(data_path, "train")
# test_path = os.path.join(data_path, "test")
data_train = {}
data_train["data"] = []
data_train["target"] = []
data_train["target_names"] = [str(i) for i in range(num_cats)]
data_test = {}
data_test["data"] = []
data_test["target"] = []
# with open(train_path, 'r') as train_f, open(test_path) as test_f:
# train_data = train_f.readlines()
# test_data = test_f.readlines()
for exam in train_data:
data_train["data"].append(exam[12:])
# print(exam)
data_train["target"].append(int(exam[9]))
for exam in test_data:
data_test["data"].append(exam[12:])
data_test["target"].append(int(exam[9]))
data_train = ap.Namespace(**data_train)
data_test = ap.Namespace(**data_test)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print(data_train.data[:10])
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
147849
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.layers import Conv1D, Conv2D, Conv3D, MaxPool1D, \
MaxPool2D, MaxPool3D, Dense, Flatten, Dropout
from mil.errors.custom_exceptions import DimensionError
class MaskedAttentionWeightNorm(tf.keras.layers.Layer):
""" Doing softmax with mask """
def __init__(self, **kwargs):
super(MaskedAttentionWeightNorm, self).__init__(**kwargs)
def call(self, x, mask):
x = tf.exp(x) * mask
soft = x / tf.reduce_sum(x, axis=0)
soft = tf.transpose(soft, [1,0])
return tf.expand_dims(soft, axis=1)
class AttentionPooling(tf.keras.layers.Layer):
def __init__(self, d, k, **kwargs):
super(AttentionPooling, self).__init__(**kwargs)
self.d = d
self.k = k
def build(self, input_shape):
self.dense_1 = Dense(self.d, activation='tanh')
self.dense_2 = Dense(self.k)
def call(self, x):
x = self.dense_1(x)
x = self.dense_2(x)
return x
class GatedAttentionPooling(tf.keras.layers.Layer):
def __init__(self, d, k, **kwargs):
super(GatedAttentionPooling, self).__init__(**kwargs)
self.d = d
self.k = k
def build(self, input_shape):
self.dense_v = Dense(self.d, activation='tanh')
self.dense_u = Dense(self.d, activation='sigmoid')
self.dense_w = Dense(self.k)
def call(self, x):
a_v = self.dense_v(x)
a_u = self.dense_u(x)
x = self.dense_w(a_v*a_u)
return x
class ConvCustom(tf.keras.layers.Layer):
""" Custom convolution layers, depending on the number of dimensions of the input,
the size and number of filters is hardcoded, can be changed """
def __init__(self, **kwargs):
super(ConvCustom, self).__init__(**kwargs)
def build(self, input_shape):
if len(input_shape) == 2:
self.conv_1 = Dense(100, activation='relu', input_shape=input_shape)
self.max_pool_1 = Dropout(0.2)
self.conv_2 = Dense(50, activation='relu')
self.max_pool_2 = Dropout(0.2)
elif len(input_shape) == 3:
self.conv_1 = Conv1D(20, kernel_size=5, activation='relu', input_shape=input_shape)
self.max_pool_1 = MaxPool1D()
self.conv_2 = Conv1D(50, kernel_size=5, activation='relu')
self.max_pool_2 = MaxPool1D()
elif len(input_shape) == 4:
self.conv_1 = Conv2D(20, kernel_size=5, activation='relu', input_shape=input_shape)
self.max_pool_1 = MaxPool2D()
self.conv_2 = Conv2D(50, kernel_size=5, activation='relu')
self.max_pool_2 = MaxPool2D()
elif len(input_shape) == 5:
self.conv_1 = Conv3D(20, kernel_size=5, activation='relu', input_shape=input_shape)
self.max_pool_1 = MaxPool3D()
self.conv_2 = Conv3D(50, kernel_size=5, activation='relu')
self.max_pool_2 = MaxPool3D()
else:
raise DimensionError("Input shape not covered by this model")
def call(self, x):
x = self.conv_1(x)
x = self.max_pool_1(x)
x = self.conv_2(x)
x = self.max_pool_2(x)
return x
class InstancesRepresentation(tf.keras.layers.Layer):
""" Represent each instance with the embedding """
def __init__(self, l, **kwargs):
super(InstancesRepresentation, self).__init__(**kwargs)
self.l = l
def build(self, input_shape):
self.flatten = Flatten()
self.dense = Dense(self.l, activation='relu')
def call(self, x):
x = self.flatten(x)
x = self.dense(x)
return x
class Masking(tf.keras.layers.Layer):
""" Helper for masking padded instances """
def __init__(self, **kwargs):
super(Masking, self).__init__(**kwargs)
def build(self, input_shape):
self.embedding = tf.keras.layers.Embedding(input_dim=1000, output_dim=1, mask_zero=True)
def call(self, padded_x):
mask = self.embedding.compute_mask(padded_x)
mask = tf.cast(tf.reduce_any(mask, np.arange(2, len(mask.shape))), dtype=tf.float32)
mask = tf.transpose(mask, [1,0])
return mask
class AttentionDeepMil(tf.keras.Model):
def __init__(self, l=500, d=128, k=1, gated=True,**kwargs):
super(AttentionDeepMil, self).__init__(**kwargs)
self.d = d
self.k = k
self.l = l
self.gated = gated
def build(self, input_shape):
self.conv = ConvCustom()
self.inst_repr = InstancesRepresentation(l=self.l)
if self.gated:
self.att = AttentionPooling(d=self.d, k=self.k)
else:
self.att = GatedAttentionPooling(d=self.d, k=self.k)
self.att_norm = MaskedAttentionWeightNorm()
self.dense = Dense(1, activation='sigmoid')
self.masking = Masking()
def call(self, padded_x):
mask = self.masking(padded_x)
# reshape to process all instances at once
feat_shape = padded_x.shape[2:]
res = [-1]
for e in feat_shape: res.append(e)
x = tf.reshape(padded_x, res)
#x = tf.reshape(padded_x, [-1,28,28,1])
# conv layer
x = self.conv(x)
ins_rep = self.inst_repr(x)
ins_rep = tf.reshape(ins_rep, [-1, padded_x.shape[1], self.l])
# calculate attention weight norm
att = self.att(ins_rep)
att = tf.transpose(tf.squeeze(att, axis=-1), [1,0])
att = self.att_norm(att, mask)
# multiply weights w/ instance representation. bag representation
m = tf.matmul(att, ins_rep)
m = tf.reshape(m, [-1, self.l])
# classification
y = self.dense(m)
return y, att
def train_step(self, data):
if len(data) == 3:
x, y, sample_weight = data
else:
x, y = data
sample_weight = None
with tf.GradientTape() as tape:
y_pred, _ = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, sample_weight=sample_weight, regularization_losses=self.losses)
# Compute gradients
gradients = tape.gradient(loss, self.trainable_variables)
# Update weights
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
class AttentionDeepPoolingMil(KerasClassifier):
"""
Attention Deeep Model using the Keras classifier class, which uses a
sklearn like structure.
from paper
Attention-based Deep Multiple Instance Learning (<NAME>, <NAME>, <NAME>)
https://arxiv.org/abs/1802.04712
"""
def __init__(self, gated=True, threshold=0.2, loss='binary_crossentropy', optimizer='adam'):
self.gated = gated
self.loss = loss
self.optimizer = optimizer
self.threshold = threshold
self.model = super(AttentionDeepPoolingMil, self).__init__(build_fn=self.build)
def build(self):
model = AttentionDeepMil(gated=self.gated)
model.compile(optimizer=self.optimizer, loss=self.loss)
return model
def predict(self, X_test, **kwargs):
return self.model.predict(X_test, **kwargs)[0]
def get_positive_instances(self, X, **kwargs):
""" Get instances with greater impact on the bag embedding
Parameters
----------
X : contains the bags to predict the positive instances
threshold : value between 0 and 1. If the weighted sum of instances
representation has more than threshold value for an instance
then the instance is marked as positive.
Returns
-------
pos_ins : a list containing the indexs of the positive instances in X
"""
y_pred, att = self.model.predict(X, **kwargs)
att = att.reshape(len(X), -1)
y_pred = y_pred.reshape(len(X), -1)
pos_ins = tf.where((att > self.threshold) & (y_pred > 0.5))
return pos_ins
|
147863
|
import os
import requests
from ..version import version
latest_url = "https://github.com/mazurwiktor/albion-online-stats/releases/latest"
def get_version():
return version
def latest_version():
req = requests.get(latest_url)
if req.status_code == 200:
return os.path.basename(req.url)
return None
def current_version():
return version
|
147885
|
import logging
from kedro.extras.logging import ColorHandler
def test_color_logger(caplog):
log = logging.getLogger(__name__)
for handler in log.handlers:
log.removeHandler(handler) # pragma: no cover
log.addHandler(ColorHandler())
log.info("Test")
for record in caplog.records:
assert record.levelname == "INFO"
assert "Test" in record.msg
|
147888
|
from dataclasses import dataclass
import json
from typing import Dict
@dataclass
class TrainerConfig:
tokenizers: Dict
def __post_init__(self):
pass
@classmethod
def load_from_json(cls, config):
return cls(
tokenizers = config["tokenizers"]
)
@classmethod
def load_from_namespace(cls, config):
pass
@classmethod
def load_from_json_file(cls, config_path):
return cls.load_from_json(json.load(open(config_path)))
|
147920
|
table = {'1985': 'Test movie' , '1983': 'Test Movie2' , '2000': 'Test Movie 3'}
year = '1983'
movie = table[year]
print(movie)
for year in table:
print(year + '\t' + movie + '\t')
|
147923
|
from TwitterAPI import TwitterAPI
from tqdm import tqdm
from time import sleep
class Threader(object):
def __init__(self, tweets, api, user=None, wait=None, max_char=280, end_string=True):
"""Create a thread of tweets.
Note that you will need your Twitter API / Application keys for
this to work.
Parameters
----------
tweets : list of strings
The tweets to send out
api : instance of TwitterAPI
An active Twitter API object using the TwitterAPI package.
user : string | None
A user to include in the tweets. If None, no user will be
included.
wait : float | None
The amount of time to wait between tweets. If None, they will
be sent out as soon as possible.
max_char : int
The maximum number of characters allowed per tweet. Threader will
check each string in `tweets` before posting anything, and raise an
error if any string has more characters than max_char.
end_string : bool
Whether to include a thread count at the end of each tweet. E.g.,
"4/" or "5x".
"""
# Check twitter API
if not isinstance(api, TwitterAPI):
raise ValueError('api must be an instance of TwitterAPI')
self.api = api
# Check tweet list
if not isinstance(tweets, list):
raise ValueError('tweets must be a list')
if not all(isinstance(it, str) for it in tweets):
raise ValueError('all items in `tweets` must be a string')
if len(tweets) < 2:
raise ValueError('you must pass two or more tweets')
# Other params
self.user = user
self.wait = wait
self.sent = False
self.end_string = end_string
self.max_char = max_char
# Construct our tweets
self.generate_tweets(tweets)
# Check user existence
if isinstance(user, str):
self._check_user(user)
def _check_user(self, user):
if user is not None:
print('Warning: including users in threaded tweets can get your '
'API token banned. Use at your own risk!')
resp = self.api.request('users/lookup', params={'screen_name': user})
if not isinstance(resp.json(), list):
err = resp.json().get('errors', None)
if err is not None:
raise ValueError('Error in finding username: {}\nError: {}'.format(user, err[0]))
def generate_tweets(self, tweets):
# Set up user ID to which we'll tweet
user = '@{} '.format(self.user) if isinstance(self.user, str) else ''
# Add end threading strings if specified
self._tweets_orig = tweets
self.tweets = []
for ii, tweet in enumerate(tweets):
this_status = '{}{}'.format(user, tweet)
if self.end_string is True:
thread_char = '/' if (ii+1) != len(tweets) else 'x'
end_str = '{}{}'.format(ii + 1, thread_char)
this_status += ' {}'.format(end_str)
else:
this_status = tweet
self.tweets.append(this_status)
if not all(len(tweet) < int(self.max_char) for tweet in self.tweets):
raise ValueError("Not all tweets are less than {} characters".format(int(self.max_char)))
def send_tweets(self):
"""Send the queued tweets to twitter."""
if self.sent is True:
raise ValueError('Already sent tweets, re-create object in order to send more.')
self.tweet_ids_ = []
self.responses_ = []
self.params_ = []
# Now generate the tweets
for ii, tweet in tqdm(enumerate(self.tweets)):
# Create tweet and add metadata
params = {'status': tweet}
if len(self.tweet_ids_) > 0:
params['in_reply_to_status_id'] = self.tweet_ids_[-1]
# Send POST and get response
resp = self.api.request('statuses/update', params=params)
if 'errors' in resp.json().keys():
raise ValueError('Error in posting tweets:\n{}'.format(
resp.json()['errors'][0]))
self.responses_.append(resp)
self.params_.append(params)
self.tweet_ids_.append(resp.json()['id'])
if isinstance(self.wait, (float, int)):
sleep(self.wait)
self.sent = True
def __repr__(self):
s = ['Threader']
s += ['Tweets', '------']
for tweet in self.tweets:
s += [tweet]
s = '\n'.join(s)
return s
|
147969
|
from ..registers import Register, RegisterClass
from ... import ir
class Accumulator(Register):
bitsize = 8
# TODO: hack, this is no register but a stack position!
class Temporary(Register):
""" On stack temporary """
bitsize = 8
A = Accumulator("A", num=0)
r0 = Temporary("r0", 0)
r1 = Temporary("r1", 1)
r2 = Temporary("r2", 2)
r3 = Temporary("r3", 3)
r4 = Temporary("r4", 4)
Temporary.registers = [r0, r1, r2, r3, r4]
register_classes = [
RegisterClass(
"reg", [ir.i8, ir.u8, ir.ptr], Temporary, Temporary.registers
)
]
|
147982
|
import _io
import torch
import pickle
from torchvision import transforms
from PIL import Image
from ocrd_typegroups_classifier.data.classmap import ClassMap
from ocrd_typegroups_classifier.data.classmap import IndexRemap
class TypegroupsClassifier:
""" Class wrapping type group information and a classifier.
Attributes
----------
classMap: ClassMap
Maps class names to indices corresponding to what the network
outputs.
network: PyTorch network
Classifier
dev: str
Device on which the data must be processed
"""
def __init__(self, groups, network, device=None):
""" Constructor of the class.
Parameters
----------
groups: map string to int
Maps names to IDs with regard to the network outputs;
note that several names can point to the same ID, but
the inverse is not possible.
network:PyTorch network
This network has to have the same interface as the
VRAEC, return three values when being called: the
classification result, a variational loss, and a feature
vector.
device: str
Device on which the data has to be processed; if not set,
then either the cpu or cuda:0 will be used.
"""
self.classMap = ClassMap(groups)
self.network = network
if device is None:
self.dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.dev = device
network.to(self.dev)
@classmethod
def load(cls, input):
""" Loads a type groups classifier from a file
Parameters
----------
input: string or file
File or path to the file from which the instance has to
be loaded.
"""
if type(input) is str:
f = open(input, 'rb')
res = cls.load(f)
f.close()
return res
if not type(input) is _io.BufferedReader:
raise Exception('TypegroupsClassifier.load() requires a string or a file')
res = pickle.load(input)
# If trained with CUDA and loaded on a device without CUDA
res.dev = torch.device(res.dev if torch.cuda.is_available() else "cpu")
res.network.to(res.dev)
return res
def save(self, output):
""" Stores the instance to a file
Parameters
----------
output: string or file
File or path to the file to which the instane has to
be stored.
"""
if type(output) is str:
f = open(output, 'wb')
self.save(f)
f.close()
return
if not type(output) is _io.BufferedWriter:
raise Exception('save() requires a string or a file')
# Moving the network to the cpu so that it can be reloaded on
# machines which do not have CUDA available.
self.network.to("cpu")
pickle.dump(self, output)
self.network.to(self.dev)
def filter(self, sample, label):
""" Removes data with unknown type groups
Parameters
----------
sample: PyTorch tensor
Tensor of inputs for the network
label: PyTorch tensor
Tensor of class IDs, the unknown ones being set to -1
Returns
-------
sample, label
The input tensors without the ones having a -1 label
"""
selection = label!=-1
return sample[selection], label[selection]
def run(self, pil_image, stride, batch_size=32, score_as_key=False):
return self.classify(pil_image, stride, batch_size, score_as_key)
def classify(self, pil_image, stride, batch_size, score_as_key=False, max_width=1000):
""" Classifies a PIL image, returning a map with class names and
corresponding scores.
Parameters
----------
pil_image: PIL image
Image to classify
stride: int
The CNN is applied patch-wise; this parameter
corresponds to the offset between two patches
batch_size: int
Number of patches which can be processed at the same
time by the hardware. If no GPU is used, then a
value of 1 is fine.
score_as_key: bool
Use scores, instead of class names, as key for the
result map.
Returns
-------
A map between class names and scores, or scores and
class names, depending on whether score_as_key is true
or false.
"""
if pil_image.size[0]>max_width:
pil_image = pil_image.resize((max_width, round(pil_image.size[1]*float(max_width)/pil_image.size[0])), Image.BILINEAR)
crop_size = min(224, pil_image.size[0])
crop_size = min(crop_size, pil_image.size[1])
tensorize = transforms.ToTensor()
was_training = self.network.training
self.network.eval()
with torch.no_grad():
score = 0
processed_samples = 0
batch = []
for x in range(0, pil_image.size[0], stride):
for y in range(0, pil_image.size[1], stride):
crop = tensorize(pil_image.crop((x, y, x+crop_size, y+crop_size)))
batch.append(crop)
if len(batch) >= batch_size:
tensors = torch.stack(batch).to(self.dev)
out = self.network(tensors)
score += out.sum(0)
processed_samples += len(batch)
batch = []
if batch:
tensors = torch.stack(batch).to(self.dev)
out = self.network(tensors)
score += out.sum(0)
processed_samples += len(batch)
batch = []
if was_training:
self.network.train()
score /= processed_samples
res = {}
for cl in self.classMap.cl2id:
cid = self.classMap.cl2id[cl]
if cid == -1:
continue
res[cl] = score[cid].item()
if score_as_key:
res = {s: c for c, s in res.items()}
return res
def __repr__(self):
""" returns a string description of the instance """
format_string = self.__class__.__name__ + '('
format_string += '\n ClassMap: %s' % self.classMap
format_string += '\n Network:'
if self.network is None:
format_string += '\n None'
else:
format_string += '\n%s\nEnd of network\n' % self.network
return format_string+'\n)'
|
148001
|
import models.local_model as model
import models.dataloader as dataloader
from models import training
import argparse
import torch
import config.config_loader as cfg_loader
parser = argparse.ArgumentParser(
description='Train Model'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = cfg_loader.load(args.config)
net = model.get_models()[cfg['model']]()
train_dataset = dataloader.VoxelizedDataset('train', cfg)
val_dataset = dataloader.VoxelizedDataset('val', cfg)
trainer = training.Trainer(net,torch.device("cuda"),train_dataset, val_dataset, cfg['folder_name'], optimizer=cfg['training']['optimizer'])
trainer.train_model(1500)
|
148008
|
from contextlib import suppress
from aiogram import Dispatcher, types
from aiogram.dispatcher.filters import IsReplyFilter, IDFilter
from bot.blocklists import banned, shadowbanned
from bot.handlers.adminmode import extract_id
async def cmd_ban(message: types.Message):
try:
user_id = extract_id(message)
except ValueError as ex:
return await message.reply(str(ex))
banned.add(int(user_id))
await message.reply(
f"ID {user_id} добавлен в список заблокированных. "
f"При попытке отправить сообщение пользователь получит уведомление о том, что заблокирован."
)
async def cmd_shadowban(message: types.Message):
try:
user_id = extract_id(message)
except ValueError as ex:
return await message.reply(str(ex))
shadowbanned.add(int(user_id))
await message.reply(
f"ID {user_id} добавлен в список скрытно заблокированных. "
f"При попытке отправить сообщение пользователь не узнает, что заблокирован."
)
async def cmd_unban(message: types.Message):
try:
user_id = extract_id(message)
except ValueError as ex:
return await message.reply(str(ex))
user_id = int(user_id)
with suppress(KeyError):
banned.remove(user_id)
with suppress(KeyError):
shadowbanned.remove(user_id)
await message.reply(f"ID {user_id} разблокирован")
async def cmd_list_banned(message: types.Message):
has_bans = len(banned) > 0 or len(shadowbanned) > 0
if not has_bans:
await message.answer("Нет заблокированных пользователей")
return
result = []
if len(banned) > 0:
result.append("Список заблокированных:")
for item in banned:
result.append(f"• #id{item}")
if len(shadowbanned) > 0:
result.append("\nСписок скрытно заблокированных:")
for item in shadowbanned:
result.append(f"• #id{item}")
await message.answer("\n".join(result))
def register_bans_handlers(dp: Dispatcher, admin_chat_id: int):
dp.register_message_handler(cmd_ban, IsReplyFilter(is_reply=True), IDFilter(chat_id=admin_chat_id),
commands="ban")
dp.register_message_handler(cmd_shadowban, IsReplyFilter(is_reply=True), IDFilter(chat_id=admin_chat_id),
commands="shadowban")
dp.register_message_handler(cmd_unban, IsReplyFilter(is_reply=True), IDFilter(chat_id=admin_chat_id),
commands="unban")
dp.register_message_handler(cmd_list_banned, IDFilter(chat_id=admin_chat_id),
commands="list_banned")
|
148065
|
import os
import unittest
from BaseSpacePy.api.BaseMountInterface import BaseMountInterface, BaseMountInterfaceException
def get_basemount_root():
import getpass
username = getpass.getuser()
config_name = "hoth"
basemount_root = "/basespace"
basemount_target = "%s.%s" % (username, config_name)
return os.path.join(basemount_root, basemount_target)
basemount_root = get_basemount_root()
project_path = os.path.join(basemount_root, "Projects", "BaseSpaceDemo")
project_id = '596596'
sample_path = os.path.join(project_path, "Samples", "BC_1")
sample_id = '855855'
class TestBaseMountInterace(unittest.TestCase):
def setUp(self):
pass
def test_fail_on_invalid_path(self):
with self.assertRaises(BaseMountInterfaceException):
BaseMountInterface("/tmp")
def test_extract_project_details(self):
bmi = BaseMountInterface(project_path)
self.assertEqual(bmi.type, "project")
self.assertEqual(bmi.id, project_id)
def test_extract_sample_details(self):
bmi = BaseMountInterface(sample_path)
self.assertEqual(bmi.type, "sample")
self.assertEqual(bmi.id, sample_id)
if __name__ == "__main__":
unittest.main()
|
148096
|
import sys
from timeit import timeit
import matplotlib.pyplot as plt
import numpy as np
# from numba import jit, njit
# from numba.typed import List
from statistics import mean
import ray
from multiprocessing import Pool
# Silences Numba warnings about a Python list being passed into a numba function
import warnings
warnings.filterwarnings('ignore')
#### OPTIONS ####
testCppFunctions = False #- set up for PyBind11 - only installed on one of my computers
testJuliaFunctions = True # - excessively slow - only installed on one of my computers
testCython = False
nTests=50
maxArraySize=1000
step=50
arrayLengths = list(range(1, maxArraySize, step))
#### END OPTIONS ####
#### Functions to test ####
def createPythonList(length):
a = list(range(length))
return [ float(x) for x in a ]
def createNumpyArray(length):
pyList = createPythonList(length)
return np.array(pyList, dtype=np.float64)
# def createNumbaTypedList(length):
# pyList = createPythonList(length)
# typedList = List()
# [ typedList.append(x) for x in pyList ]
# return typedList
def addFive_Python(array):
return [ x+5.0 for x in array ]
@ray.remote
def addFive_Python_RaySub(array):
return [ x+5.0 for x in array ]
def addFive_Python_Ray(array):
chunk = round(len(array)/2)
future1 = addFive_Python_RaySub.remote(array[:chunk])
future2 = addFive_Python_RaySub.remote(array[chunk:])
return ray.get(future1) + ray.get(future2)
def addFive_Python_Multiprocessing(array):
chunk = round(len(array)/2)
with Pool(2) as p:
results = p.map(addFive_Python, [ array[:chunk], array[chunk:]])
return results[0] + results[1]
# @njit()
def addFive_Numba(array):
for i in range(len(array)):
array[i] += 5.0
return array
def addFive_Numpy(array):
array += 5.0
return array
# Assume the Python function will be imported from this file
pythonBenchmarkSetupString = """
from addScalarToArray import {}, {}
pyList = {}({})
"""
# Assume Comparison function list generator will be imported from this file
# Comparison function can be imported from any module
comparisonBenchmarkSetupString = """
from addScalarToArray import {}
from {} import {}
comparisonArray = {}({})
# Call function once to pre-compile it for JIT methods like numba
{}(comparisonArray)
"""
#### Functions that do the testing / result plotting ####
def getSpeedup(length, comparisonFunction, comparisonFunctionListGenerator, comparisonFnModule, pythonFunction, pythonListGenerator):
setupPython = pythonBenchmarkSetupString.format(pythonFunction, pythonListGenerator, pythonListGenerator, length)
setupComparison = comparisonBenchmarkSetupString.format(comparisonFunctionListGenerator, comparisonFnModule, comparisonFunction.__name__, comparisonFunctionListGenerator, length, comparisonFunction.__name__)
pythonTime = timeit("{}(pyList)".format(pythonFunction), setup=setupPython, number=nTests)
fnTime = timeit("{}(comparisonArray)".format(comparisonFunction.__name__), setup=setupComparison, number=nTests)
return pythonTime/fnTime
def plotSpeedupForEachArrayLength(function, label="Unlabelled", comparisonFunctionListGenerator="createNumpyArray", comparisonFnModule="addScalarToArray", pythonFunction="addFive_Python", pythonListGenerator="createPythonList"):
ratios = [ getSpeedup(l, function, comparisonFunctionListGenerator, comparisonFnModule, pythonFunction, pythonListGenerator) for l in arrayLengths ]
print("Speedup {:<40}: {:>6.2f}, {:>6.2f}, {:>6.2f}".format("("+label+")", min(ratios), max(ratios), mean(ratios)))
# Plot result, with different line styles depending on which data type is being operated on
if "ndarray" in label:
plt.plot(arrayLengths, ratios, linestyle="dashed", label=label)
elif "numba.typed.List" in label:
plt.plot(arrayLengths, ratios, linestyle="dotted", label=label)
else:
plt.plot(arrayLengths, ratios, label=label)
if testJuliaFunctions:
import julia
from julia import Main
Main.include("addScalar.jl")
# function to test is Main.addFive - seems very slow, must be converting types or not being compiled
addFive_Julia = Main.addFive_Julia
#### Main ####
if __name__ == "__main__":
print("Each operation performed {} times".format(nTests))
print("Speedup {:<40}: {:>6}, {:>6}, {:>6}".format("", "Min", "Max", "Mean"))
plotSpeedupForEachArrayLength(addFive_Python, label="Python loop: ndarray")
plotSpeedupForEachArrayLength(addFive_Python, label="Python loop: list", comparisonFunctionListGenerator="createPythonList")
plotSpeedupForEachArrayLength(addFive_Numpy, label="numpy +=: ndarray")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: list", comparisonFunctionListGenerator="createPythonList")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: numba.typed.List", comparisonFunctionListGenerator="createNumbaTypedList")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: ndarray")
if testCython:
# Must have compiled the 'addScalarCython.pyx' file on your machine using Cython to make this work
# Run `cythonize addScalar.pyx`
# https://cython.readthedocs.io/en/latest/MAPLEAF/tutorial/cython_tutorial.html
import addScalarCython
plotSpeedupForEachArrayLength(addScalarCython.addFive_Numpy, comparisonFnModule="addScalarCython", label="Cython - strongly-typed: ndarray")
plotSpeedupForEachArrayLength(addScalarCython.addFive_Plain, comparisonFnModule="addScalarCython", label="Cython - Plain Python: list", comparisonFunctionListGenerator="createPythonList")
plotSpeedupForEachArrayLength(addScalarCython.addFive_Plain, comparisonFnModule="addScalarCython", label="Cython - Plain Python: ndarray")
# Too slow
# plotSpeedupForEachArrayLength(addFive_Python_Multiprocessing, label="Multiprocessing: 2 cores")
# Also very slow, but less so because processes are only launched once
# ray.init()
# plotSpeedupForEachArrayLength(addFive_Python_Ray, label="Ray: 2 cores")
if testCppFunctions:
import example
#Functions to test are:
example.addToList_Cpp # - (converts to std::vector and back)
# example.addFive(nV1) # - (no conversion, loops in C++)
example.vectorizedAddFive #- (C++ function wrapped with py::vectorize to work on an array)
if testJuliaFunctions:
plotSpeedupForEachArrayLength(Main.addFive_Julia, label="Julia, ndarray")
plotSpeedupForEachArrayLength(Main.addFive_Julia, label="Julia, list", comparisonFunctionListGenerator="createPythonList")
plt.xlabel("Array size")
plt.ylabel("Speedup")
plt.title("Elementwise adding a scalar to an array of float64")
plt.autoscale(tight=True, axis="y")
plt.xlim([0, maxArraySize])
# plt.yscale("log")
plt.legend()
plt.show()
|
148112
|
class EnergyAnalysisDetailModelOptions(object,IDisposable):
"""
Options that govern the calculations for the generation of the energy analysis detail model.
EnergyAnalysisDetailModelOptions()
"""
def Dispose(self):
""" Dispose(self: EnergyAnalysisDetailModelOptions) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: EnergyAnalysisDetailModelOptions,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
EnergyModelType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""It indicates whether the energy model is based on rooms/spaces or building elements.
Get: EnergyModelType(self: EnergyAnalysisDetailModelOptions) -> EnergyModelType
Set: EnergyModelType(self: EnergyAnalysisDetailModelOptions)=value
"""
ExportMullions=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates if to specify the setting for exporting mullions.
Get: ExportMullions(self: EnergyAnalysisDetailModelOptions) -> bool
Set: ExportMullions(self: EnergyAnalysisDetailModelOptions)=value
"""
IncludeShadingSurfaces=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates if to set and get the setting for if shading surfaces should be included.
Get: IncludeShadingSurfaces(self: EnergyAnalysisDetailModelOptions) -> bool
Set: IncludeShadingSurfaces(self: EnergyAnalysisDetailModelOptions)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: EnergyAnalysisDetailModelOptions) -> bool
"""
SimplifyCurtainSystems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates if to specify the setting for simplified curtain systems.
Get: SimplifyCurtainSystems(self: EnergyAnalysisDetailModelOptions) -> bool
Set: SimplifyCurtainSystems(self: EnergyAnalysisDetailModelOptions)=value
"""
Tier=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Level of computation for energy analysis model.
Get: Tier(self: EnergyAnalysisDetailModelOptions) -> EnergyAnalysisDetailModelTier
Set: Tier(self: EnergyAnalysisDetailModelOptions)=value
"""
|
148113
|
import os
import os.path as osp
import subprocess
import time
import sys
import argparse
import numpy as np
from sklearn.metrics import roc_auc_score
import time
LSGKM_DIR = osp.join("baselines", "lsgkm-master/src")
LSGKM_TRAIN = osp.join(LSGKM_DIR, "gkmtrain")
LSGKM_PREDICT = osp.join(LSGKM_DIR, "gkmpredict")
def get_args():
parser = argparse.ArgumentParser(description="Analyze lsgkm results data")
parser.add_argument(
"--data_dir",
type=str,
default="./baselines/gkm_data",
help="Dataset directory",
metavar="DATA_DIR",
)
parser.add_argument(
"--prefix", type=str, required=True, help="Dataset prefix", metavar="PREFIX"
)
parser.add_argument(
"--outdir",
type=str,
default="./temp",
metavar="NAME",
help="Directory to store intermediate and output files",
)
parser.add_argument("-g", type=int, required=True)
parser.add_argument("-m", type=int, required=True)
parser.add_argument("-T", type=int, required=True)
parser.add_argument(
"--dict",
type=str,
required=False,
help="Dictionary file name (not needed for DNA datasets)",
)
return parser.parse_args()
def read_preds(file):
preds = []
with open(file, "r") as f:
for line in f:
line = line.split()
assert len(line) == 2
preds.append(float(line[1]))
return preds
def get_accuracy(pos_preds, neg_preds):
accuracy = 0
num_correct = 0
num_pred = len(pos_preds) + len(neg_preds)
for pred in pos_preds:
if pred > 0:
num_correct += 1
for pred in neg_preds:
if pred <= 0:
num_correct += 1
return num_correct / num_pred
def get_auc(pos_preds, neg_preds):
ytrue = [1 for _ in pos_preds] + [-1 for _ in neg_preds]
yscore = [score for score in pos_preds] + [score for score in neg_preds]
auc = roc_auc_score(ytrue, yscore)
return auc
args = get_args()
g, m, T = args.g, args.m, args.T
k = g - m
# input files
dir, prefix = args.data_dir, args.prefix
train_pos_file = osp.join(dir, prefix + ".train.pos.fasta")
train_neg_file = osp.join(dir, prefix + ".train.neg.fasta")
test_pos_file = osp.join(dir, prefix + ".test.pos.fasta")
test_neg_file = osp.join(dir, prefix + ".test.neg.fasta")
# output files
outdir = args.outdir
if not osp.exists(outdir):
os.makedirs(outdir)
svm_file_prefix = osp.join(outdir, "svmtrain")
svmtrain = svm_file_prefix + ".model.txt"
pos_pred_file = osp.join(outdir, prefix + ".preds.pos.out")
neg_pred_file = osp.join(outdir, prefix + ".preds.neg.out")
### train SVM ###
print("Training model...")
command = [LSGKM_TRAIN, "-t", str(2), "-l", str(g), "-k", str(k), "-d", str(m), "-T", str(T), "-R"]
command += [train_pos_file, train_neg_file, svm_file_prefix]
print(" ".join(command))
output = subprocess.check_output(command)
### test ###
print("Getting predictions...")
# get pos preds
command = [LSGKM_PREDICT, "-v", str(0), "-T", str(T)]
command += [test_pos_file, svmtrain, pos_pred_file]
print(" ".join(command))
subprocess.check_output(command)
# get neg preds
command = [LSGKM_PREDICT, "-v", str(0), "-T", str(T)]
command += [test_neg_file, svmtrain, neg_pred_file]
print(" ".join(command))
subprocess.check_output(command)
### evaluate ###
pos_preds = read_preds(pos_pred_file)
neg_preds = read_preds(neg_pred_file)
print("Computing accuracy...")
accuracy = get_accuracy(pos_preds, neg_preds)
print("Computing AUC...")
auc = get_auc(pos_preds, neg_preds)
print("Accuracy = {}, AUC = {}".format(accuracy, auc))
|
148123
|
import tensorflow as tf
weight = tf.Variable(1.0,name="weight")
input_value = tf.constant(0.5,name="input_value")
expected_output = tf.constant(0.0,name="expected_output")
model = tf.multiply(input_value,weight,"model")
loss_function = tf.pow(expected_output - model,2,name="loss_function")
optimizer = tf.train.GradientDescentOptimizer(0.025).minimize(loss_function)
for value in [input_value,weight,expected_output,model,loss_function]:
tf.summary.scalar(value.op.name,value)
summaries = tf.summary.merge_all()
sess = tf.Session()
summary_writer = tf.summary.FileWriter('log_simple_stats',sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(100):
summary_writer.add_summary(sess.run(summaries),i)
sess.run(optimizer)
|
148165
|
import clawpack.pyclaw
class State(clawpack.pyclaw.State):
"""Parallel State class"""
__doc__ += clawpack.pyclaw.util.add_parent_doc(clawpack.pyclaw.state)
@property
def num_eqn(self):
r"""(int) - Number of unknowns (components of q)"""
if self.q_da is None:
raise Exception('state.num_eqn has not been set.')
else: return self.q_da.dof
@property
def num_aux(self):
r"""(int) - Number of auxiliary fields"""
if self.aux_da is None: return 0
else: return self.aux_da.dof
@property
def mp(self):
r"""(int) - Number of derived quantities (components of p)"""
if self._p_da is None:
raise Exception('state.mp has not been set.')
else: return self._p_da.dof
@mp.setter
def mp(self,mp):
if self._p_da is not None:
raise Exception('You cannot change state.mp after p is initialized.')
else:
self._p_da = self._create_DA(mp)
self.gpVec = self._p_da.createGlobalVector()
@property
def mF(self):
r"""(int) - Number of derived quantities (components of p)"""
if self._F_da is None:
raise Exception('state.mF has not been set.')
else: return self._F_da.dof
@mF.setter
def mF(self,mF):
if self._F_da is not None:
raise Exception('You cannot change state.mp after p is initialized.')
else:
self._F_da = self._create_DA(mF)
self.gFVec = self._F_da.createGlobalVector()
@property
def q(self):
r"""
Array of solution values.
"""
shape = self.grid.num_cells
shape.insert(0,self.num_eqn)
return self.gqVec.getArray().reshape(shape, order = 'F')
@q.setter
def q(self,val):
self.gqVec.setArray(val.reshape([-1], order = 'F'))
@property
def p(self):
r"""
Array containing values of derived quantities for output.
"""
if self._p_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mp)
p=self.gpVec.getArray().reshape(shape, order = 'F')
return p
@p.setter
def p(self,val):
mp = val.shape[0]
if self.gpVec is None: self.init_p_da(mp)
self.gpVec.setArray(val.reshape([-1], order = 'F'))
@property
def F(self):
r"""
Array containing pointwise values (densities) of output functionals.
This is just used as temporary workspace before summing.
"""
if self._F_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mF)
F=self.gFVec.getArray().reshape(shape, order = 'F')
return F
@F.setter
def fset(self,val):
mF = val.shape[0]
if self.gFVec is None: self.init_F_da(mF)
self.gFVec.setArray(val.reshape([-1], order = 'F'))
@property
def aux(self):
"""
We never communicate aux values; every processor should set its own ghost cell
values for the aux array. The global aux vector is used only for outputting
the aux values to file; everywhere else we use the local vector.
"""
if self.aux_da is None: return None
shape = self.grid.num_cells
shape.insert(0,self.num_aux)
aux=self.gauxVec.getArray().reshape(shape, order = 'F')
return aux
@aux.setter
def aux(self,val):
# It would be nice to make this work also for parallel
# loading from a file.
if self.aux_da is None:
num_aux=val.shape[0]
self._init_aux_da(num_aux)
self.gauxVec.setArray(val.reshape([-1], order = 'F'))
@property
def num_dim(self):
return self.patch.num_dim
def __init__(self,geom,num_eqn,num_aux=0):
r"""
Here we don't call super because q and aux must be properties in PetClaw
but should not be properties in PyClaw.
:attributes:
patch - The patch this state lives on
"""
from clawpack.pyclaw import geometry
if isinstance(geom,geometry.Patch):
self.patch = geom
elif isinstance(geom,geometry.Domain):
self.patch = geom.patches[0]
else:
raise Exception("""A PetClaw State object must be initialized with
a PetClaw Patch or Domain object.""")
self.aux_da = None
self.q_da = None
self._p_da = None
self.gpVec = None
self._F_da = None
self.gFVec = None
# ========== Attribute Definitions ===================================
self.problem_data = {}
r"""(dict) - Dictionary of global values for this patch,
``default = {}``"""
self.t=0.
r"""(float) - Current time represented on this patch,
``default = 0.0``"""
self.index_capa = -1
self.keep_gauges = False
r"""(bool) - Keep gauge values in memory for every time step,
``default = False``"""
self.gauge_data = []
r"""(list) - List of numpy.ndarray objects. Each element of the list
stores the values of the corresponding gauge if ``keep_gauges`` is set
to ``True``"""
self._init_q_da(num_eqn)
if num_aux>0: self._init_aux_da(num_aux)
def _init_aux_da(self,num_aux,num_ghost=0):
r"""
Initializes PETSc DA and global & local Vectors for handling the
auxiliary array, aux.
Initializes aux_da, gauxVec and _aux_local_vector.
"""
self.aux_da = self._create_DA(num_aux,num_ghost)
self.gauxVec = self.aux_da.createGlobalVector()
self._aux_local_vector = self.aux_da.createLocalVector()
def _init_q_da(self,num_eqn,num_ghost=0):
r"""
Initializes PETSc DA and Vecs for handling the solution, q.
Initializes q_da, gqVec and _q_local_vector.
"""
self.q_da = self._create_DA(num_eqn,num_ghost)
self.gqVec = self.q_da.createGlobalVector()
self._q_local_vector = self.q_da.createLocalVector()
def _create_DA(self,dof,num_ghost=0):
r"""Returns a PETSc DA and associated global Vec.
Note that no local vector is returned.
"""
from petsc4py import PETSc
#Due to the way PETSc works, we just make the patch always periodic,
#regardless of the boundary conditions actually selected.
#This works because in solver.qbc() we first call globalToLocal()
#and then impose the real boundary conditions (if non-periodic).
if hasattr(PETSc.DA, 'PeriodicType'):
if self.num_dim == 1:
periodic_type = PETSc.DA.PeriodicType.X
elif self.num_dim == 2:
periodic_type = PETSc.DA.PeriodicType.XY
elif self.num_dim == 3:
periodic_type = PETSc.DA.PeriodicType.XYZ
else:
raise Exception("Invalid number of dimensions")
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
periodic_type = periodic_type,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
else:
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
boundary_type = PETSc.DA.BoundaryType.PERIODIC,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
return DA
def get_qbc_from_q(self,num_ghost,qbc):
"""
Returns q with ghost cells attached, by accessing the local vector.
"""
shape = [n + 2*num_ghost for n in self.grid.num_cells]
self.q_da.globalToLocal(self.gqVec, self._q_local_vector)
shape.insert(0,self.num_eqn)
return self._q_local_vector.getArray().reshape(shape, order = 'F')
def get_auxbc_from_aux(self,num_ghost,auxbc):
"""
Returns aux with ghost cells attached, by accessing the local vector.
"""
shape = [n + 2*num_ghost for n in self.grid.num_cells]
self.aux_da.globalToLocal(self.gauxVec, self._aux_local_vector)
shape.insert(0,self.num_aux)
return self._aux_local_vector.getArray().reshape(shape, order = 'F')
def set_num_ghost(self,num_ghost):
r"""
This is a hack to deal with the fact that petsc4py
doesn't allow us to change the stencil_width (num_ghost).
Instead, we initially create DAs with stencil_width=0.
Then, in solver.setup(), we call this function to replace
those DAs with new ones that have the right stencil width.
This could be made more efficient using some PETSc calls,
but it only happens once so it seems not to be worth it.
"""
q0 = self.q.copy()
self._init_q_da(self.num_eqn,num_ghost)
self.q = q0
if self.aux is not None:
aux0 = self.aux.copy()
self._init_aux_da(self.num_aux,num_ghost)
self.aux = aux0
def sum_F(self,i):
return self.gFVec.strideNorm(i,0)
def get_q_global(self):
r"""
Returns a copy of the global q array on process 0, otherwise returns None
"""
from petsc4py import PETSc
q_natural = self.q_da.createNaturalVec()
self.q_da.globalToNatural(self.gqVec, q_natural)
scatter, q0Vec = PETSc.Scatter.toZero(q_natural)
scatter.scatter(q_natural, q0Vec, False, PETSc.Scatter.Mode.FORWARD)
rank = PETSc.COMM_WORLD.getRank()
if rank == 0:
shape = self.patch.num_cells_global
shape.insert(0,self.num_eqn)
q0=q0Vec.getArray().reshape(shape, order = 'F').copy()
else:
q0=None
scatter.destroy()
q0Vec.destroy()
return q0
def get_aux_global(self):
r"""
Returns a copy of the global aux array on process 0, otherwise returns None
"""
from petsc4py import PETSc
aux_natural = self.aux_da.createNaturalVec()
self.aux_da.globalToNatural(self.gauxVec, aux_natural)
scatter, aux0Vec = PETSc.Scatter.toZero(aux_natural)
scatter.scatter(aux_natural, aux0Vec, False, PETSc.Scatter.Mode.FORWARD)
rank = PETSc.COMM_WORLD.getRank()
if rank == 0:
shape = self.patch.num_cells_global
shape.insert(0,self.num_aux)
aux0=aux0Vec.getArray().reshape(shape, order = 'F').copy()
else:
aux0=None
scatter.destroy()
aux0Vec.destroy()
return aux0
def __deepcopy__(self,memo={}):
r"""
Calls the pyclaw deepcopy function, but also copies the number of ghost cells
"""
result = super(State,self).__deepcopy__(memo)
result.set_num_ghost(self.q_da.stencil_width)
return result
|
148174
|
import pynetbox
import os
import ipaddress
netbox_token = os.getenv("NETBOX_TOKEN")
netbox_url = os.getenv("NETBOX_URL")
site_name = os.getenv("NETBOX_SITE")
tenant_name = os.getenv("NETBOX_TENANT")
netbox = pynetbox.api(netbox_url, token=netbox_token)
tenant = netbox.tenancy.tenants.get(name=tenant_name)
mgmt_tenant = netbox.tenancy.tenants.get(name="Management")
site = netbox.dcim.sites.get(name=site_name)
prod_vlan_group = netbox.ipam.vlan_groups.get(
site_id=site.id, name="Production"
)
# Get devices for site
devices = netbox.dcim.devices.filter(site_id=site.id, tenant_id=tenant.id)
# Fill in details
for device in devices:
device.interfaces = netbox.dcim.interfaces.filter(device_id=device.id)
for interface in device.interfaces:
interface.ip_addresses = netbox.ipam.ip_addresses.filter(
interface_id=interface.id
)
for ip_address in interface.ip_addresses:
ip_address.ip = ipaddress.ip_address(
ip_address.address.split("/")[0]
)
ip_address.network = ipaddress.ip_network(
ip_address.address, strict=False
)
# Get VLAN Info from Netbox
vlans = netbox.ipam.vlans.filter(site_id=site.id, group_id=prod_vlan_group.id)
# Retrieve Prefixes for VLANs
for vlan in vlans:
try:
vlan.prefix = netbox.ipam.prefixes.get(vlan_id=vlan.id)
except Exception as e:
print(e)
# print("VLAN ID: {} Name {}".format(vlan.vid, vlan.name))
|
148175
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.animals import animals
def test_animals():
"""Test module animals.py by downloading
animals.csv and testing shape of
extracted data has 20 rows and 6 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = animals(test_path)
try:
assert x_train.shape == (20, 6)
except:
shutil.rmtree(test_path)
raise()
|
148262
|
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, **kwargs):
super(Model, self).__init__()
self.image_size = kwargs['IMAGE_SIZE']
self.patch_keys = kwargs['PATCH_KEYS']
self.target_keys = kwargs['TARGET_KEYS']
self.layer1 = nn.Sequential(nn.Conv2d(2, 64, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(64))
self.layer2 = nn.Sequential(nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(64), nn.MaxPool2d(2))
self.layer3 = nn.Sequential(nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(64))
self.layer4 = nn.Sequential(nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(64), nn.MaxPool2d(2))
self.layer5 = nn.Sequential(nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
self.layer6 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128), nn.MaxPool2d(2))
self.layer7 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
if self.image_size == 128:
self.layer8 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
elif self.image_size == 512:
self.layer8 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128),
nn.MaxPool2d(2))
self.layer9 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
self.layer10 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128),
nn.MaxPool2d(2))
self.layer11 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
self.layer12 = nn.Sequential(nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(), nn.BatchNorm2d(128))
self.fc1 = nn.Sequential(nn.Linear(128 * 16 * 16, 1024), nn.ReLU())
self.fc2 = nn.Linear(1024, 8)
def _forward(self, x):
# Forward
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = self.layer7(out)
out = self.layer8(out)
if self.image_size == 512:
out = self.layer9(out)
out = self.layer10(out)
out = self.layer11(out)
out = self.layer12(out)
out = out.view(-1, 128 * 16 * 16)
out = self.fc1(out)
out = self.fc2(out)
return out.reshape(-1, 4, 2)
def forward(self, data):
(e1, e2) = self.patch_keys
o = self.target_keys[0]
p1 = data[e1]
p2 = data[e2]
x = torch.cat([p1, p2], axis=1)
data[o] = self._forward(x)
return data
def predict_homography(self, data):
return self.forward(data)
|
148264
|
import typing as t
from pathlib import Path
import yaml
def get_resources(path: Path) -> t.List[t.Dict]:
"""Loads resource YAMLs from provided path."""
resources = []
for item in path.iterdir():
if item.is_file() and item.suffix == ".yaml" and item.name != "_category_info.yaml":
resources.append(yaml.safe_load(item.read_text()))
return resources
def get_subcategories(path: Path) -> t.List[t.Dict]:
"""Loads resources subcategories with their resources by provided path."""
subcategories = []
for item in path.iterdir():
if item.is_dir() and item.joinpath("_category_info.yaml").exists():
subcategories.append({
"category_info": {
**yaml.safe_load(
item.joinpath("_category_info.yaml").read_text()
),
"raw_name": item.name
},
"resources": [
yaml.safe_load(subitem.read_text())
for subitem in item.iterdir()
if (
subitem.is_file()
and subitem.suffix == ".yaml"
and subitem.name != "_category_info.yaml"
)
]
})
return subcategories
|
148270
|
import pandas as pd
import toolz
import ibis.common.exceptions as com
import ibis.config
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis.backends.base import BaseBackend
from .client import PandasDatabase, PandasTable, ibis_schema_to_pandas
class BasePandasBackend(BaseBackend):
"""
Base class for backends based on pandas.
"""
def do_connect(self, dictionary):
"""Construct a client from a dictionary of DataFrames.
Parameters
----------
dictionary : dict
Returns
-------
Backend
"""
# register dispatchers
from . import execution # noqa F401
from . import udf # noqa F401
self.dictionary = dictionary
def from_dataframe(self, df, name='df', client=None):
"""
convenience function to construct an ibis table
from a DataFrame
Parameters
----------
df : DataFrame
name : str, default 'df'
client : Backend, optional
client dictionary will be mutated with the name of the DataFrame,
if not provided a new client is created
Returns
-------
Table
"""
if client is None:
return self.connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name)
def register_options(self):
ibis.config.register_option(
'enable_trace',
False,
f'Whether enable tracing for {self.name} execution. '
'See ibis.{self.name}.trace for details.',
validator=ibis.config.is_bool,
)
@property
def version(self) -> str:
return pd.__version__
@property
def current_database(self):
raise NotImplementedError('pandas backend does not support databases')
def list_databases(self, like=None):
raise NotImplementedError('pandas backend does not support databases')
def list_tables(self, like=None, database=None):
return self._filter_with_like(list(self.dictionary.keys()), like)
def table(self, name: str, schema: sch.Schema = None):
df = self.dictionary[name]
schema = sch.infer(df, schema=schema)
return self.table_class(name, schema, self).to_expr()
def database(self, name=None):
return self.database_class(name, self)
def load_data(self, table_name, obj, **kwargs):
# kwargs is a catch all for any options required by other backends.
self.dictionary[table_name] = obj
def get_schema(self, table_name, database=None):
return sch.infer(self.dictionary[table_name])
def compile(self, expr, *args, **kwargs):
return expr
class Backend(BasePandasBackend):
name = 'pandas'
database_class = PandasDatabase
table_class = PandasTable
def execute(self, query, params=None, limit='default', **kwargs):
from .core import execute_and_reset
if limit != 'default':
raise ValueError(
'limit parameter to execute is not yet implemented in the '
'pandas backend'
)
if not isinstance(query, ir.Expr):
raise TypeError(
"`query` has type {!r}, expected ibis.expr.types.Expr".format(
type(query).__name__
)
)
return execute_and_reset(query, params=params, **kwargs)
def create_table(self, table_name, obj=None, schema=None):
"""Create a table."""
if obj is None and schema is None:
raise com.IbisError('Must pass expr or schema')
if obj is not None:
df = pd.DataFrame(obj)
else:
dtypes = ibis_schema_to_pandas(schema)
df = schema.apply_to(
pd.DataFrame(columns=list(map(toolz.first, dtypes)))
)
self.dictionary[table_name] = df
|
148312
|
import json
import aiohttp
from aiohttp import TCPConnector
from aiohttp.errors import ClientConnectionError
from plumeria.util.network import NameResolver
class SelectiveConnector(TCPConnector):
def __init__(self, *args, port_validator=None, **kwargs):
super().__init__(*args, **kwargs)
self.port_validator = port_validator or (lambda p: p == 80 or p == 443)
def connect(self, req):
if not self.port_validator(req.port):
raise ClientConnectionError("port {} is not permitted".format(req.port))
return super().connect(req)
class DefaultClientSession(aiohttp.ClientSession):
def __init__(self, *args, headers=None, connector=None, port_validator=None, **kwargs):
if not headers: headers = {}
headers['User-Agent'] = 'Discord chat bot'
if not connector:
connector = SelectiveConnector(resolver=NameResolver(),
port_validator=port_validator)
super().__init__(*args, headers=headers, connector=connector, **kwargs)
class BadStatusCodeError(Exception):
def __init__(self, http_code, *args, **kwargs):
super().__init__(*args, **kwargs)
self.http_code = http_code
class Response:
def __init__(self, status_code, text):
self.status_code = status_code
self._text = text
def text(self):
return self._text
def json(self):
return json.loads(self._text)
async def request(*args, require_success=True, **kwargs):
if 'data' in kwargs:
if isinstance(kwargs['data'], dict) or isinstance(kwargs['data'], list):
kwargs['data'] = json.dumps(kwargs['data'])
with DefaultClientSession() as session:
async with session.request(*args, **kwargs) as resp:
if require_success and resp.status != 200:
raise BadStatusCodeError(resp.status, "HTTP code is not 200; got {}\n\nCONTENT: {}".format(resp.status, await resp.text()))
return Response(resp.status, await resp.text())
async def get(*args, **kwargs):
return await request("get", *args, **kwargs)
async def post(*args, **kwargs):
return await request("post", *args, **kwargs)
async def head(*args, **kwargs):
return await request("head", *args, **kwargs)
class BaseRestClient:
def __init__(self, session_cls=None, default_params=None):
self.session_cls = session_cls or DefaultClientSession
def preprocess(self, json):
return json
async def request(self, *args, **kwargs):
with self.session_cls() as session:
async with session.request(*args, **kwargs) as resp:
if resp.status != 200:
raise APIError("HTTP code is not 200; got {}".format(resp.status))
return self.preprocess(await resp.json())
class APIError(Exception):
pass
|
148318
|
from PIL import Image
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from examples.classification.cat_dog_net import CatDogNet
from pytorch_toolbox.transformations.image import Resize, Normalize, NumpyImage2Tensor
from pytorch_toolbox.transformations.to_float import ToFloat
from pytorch_toolbox.transformations.compose import Compose
from pytorch_toolbox.probe.activation import show_activations
if __name__ == '__main__':
backend = "cpu" # cpu|cuda
model_path = "cat_dog_model.pth.tar"
#
# Instantiate model and load checkpoint
#
model = CatDogNet()
model.load(model_path)
model.eval()
cat_path = "images/cat.jpg"
# Load test images and prepare it for input in the loaded network
cat_img_numpy = np.array(Image.open(cat_path).convert('RGB'))
imagenet_mean = [123, 116, 103]
imagenet_std = [58, 57, 57]
transformations = Compose([Resize((128, 128)),
ToFloat(),
NumpyImage2Tensor(),
Normalize(mean=imagenet_mean, std=imagenet_std)])
cat_img = Variable(transformations(cat_img_numpy).unsqueeze(0))
"""
Show activation of input image.
"""
prediction = model(cat_img)
activations = model.load_activations()
for name, feature in activations.items():
show_activations(feature[0], name)
plt.show()
|
148319
|
from setuptools import find_packages, setup
install_requires = [
"pandas",
"numpy",
"networkx",
"matplotlib",
"seaborn",
"tqdm",
]
#setup_requires = ['pytest-runner']
#tests_require = [
# 'pytest',
# 'pytest-cov',
# 'codecov'
#]
keywords = [
"bitcoin",
"lightning-network",
"simulator",
"simulation",
"research",
"cryptoeconomics"
]
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='lnsimulator',
version='0.1.0',
description="Traffic Simulator for Bitcoin's Lightning Network ",
url='https://github.com/ferencberes/LNTrafficSimulator',
author='<NAME>',
author_email='<EMAIL>',
packages = find_packages(),
install_requires=install_requires,
#setup_requires = setup_requires,
#tests_require = tests_require,
keywords = keywords,
long_description=long_description,
long_description_content_type='text/markdown',
)
|
148337
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("BSworkflow")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"/store/express/Run2015A/StreamExpress/ALCARECO/TkAlMinBias-Express-v1/000/246/959/00000/14174DF2-490A-E511-9862-02163E0143E9.root",
)
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
)
process.MessageLogger.debugModules = ['BeamSpotAnalyzer']
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
# this GT is for the Express, to be consistent with the file above
# in general this GT should be for the ReReco
process.GlobalTag.globaltag = 'GR_E_V48'
## Track refit
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
# remove the following lines if you run on RECO files
process.TrackRefitter.src = 'ALCARECOTkAlMinBias'
process.TrackRefitter.NavigationSchool = ''
## PV refit
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices
process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone()
process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("TrackRefitter")
process.offlinePrimaryVerticesFromRefittedTrks.vertexCollections.maxDistanceToBeam = 1
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxNormalizedChi2 = 20
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minSiliconLayersWithHits = 5
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Significance = 5.0
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minPixelLayersWithHits = 2
## BeamSpot fit
process.load("RecoVertex.BeamSpotProducer.d0_phi_analyzer_cff")
process.d0_phi_analyzer.BeamFitter.WriteAscii = True
process.d0_phi_analyzer.BeamFitter.AsciiFileName = 'BeamFit_LumiBased_NewAlignWorkflow_alcareco.txt'
process.d0_phi_analyzer.BeamFitter.AppendRunToFileName = False
process.d0_phi_analyzer.BeamFitter.InputBeamWidth = -1
process.d0_phi_analyzer.BeamFitter.MaximumImpactParameter = 1.0
process.d0_phi_analyzer.BeamFitter.MaximumNormChi2 = 10
process.d0_phi_analyzer.BeamFitter.MinimumInputTracks = 50
process.d0_phi_analyzer.BeamFitter.MinimumPixelLayers = -1
process.d0_phi_analyzer.BeamFitter.MinimumPt = 1.0
process.d0_phi_analyzer.BeamFitter.MinimumTotalLayers = 6
process.d0_phi_analyzer.BeamFitter.OutputFileName = 'BeamFit_LumiBased_Workflow_alcareco.root'
process.d0_phi_analyzer.BeamFitter.TrackAlgorithm = cms.untracked.vstring()
process.d0_phi_analyzer.BeamFitter.TrackCollection = 'TrackRefitter'
process.d0_phi_analyzer.BeamFitter.SaveFitResults = True
process.d0_phi_analyzer.BeamFitter.SaveNtuple = False
process.d0_phi_analyzer.BeamFitter.SavePVVertices = True
process.d0_phi_analyzer.PVFitter.Apply3DFit = True
process.d0_phi_analyzer.PVFitter.minNrVerticesForFit = 10
process.d0_phi_analyzer.PVFitter.nSigmaCut = 50.0
process.d0_phi_analyzer.PVFitter.VertexCollection = 'offlinePrimaryVerticesFromRefittedTrks'
process.d0_phi_analyzer.BSAnalyzerParameters.fitEveryNLumi = 1
process.d0_phi_analyzer.BSAnalyzerParameters.resetEveryNLumi = 1
process.p = cms.Path(process.offlineBeamSpot +
process.TrackRefitter +
process.offlinePrimaryVerticesFromRefittedTrks +
process.d0_phi_analyzer)
|
148353
|
import pymongo
import sys
from pymongo import MongoClient
client = MongoClient()
db = client['scraping'] #get the database
lang = sys.argv[1]
## The database consists of 3 different collections
# 1. tweets
# 2. blog posts
# 3. forum posts
tweets = db['tweets']
blog_posts = db['blogPosts']
forum_posts = db['forumPosts']
# fetch all documents in each collection
# languageCode codes are the same as in the ../scraping folder
for post in blog_posts.find({"languageCode": lang}):
print post['data'].encode("utf-8")
for post in forum_posts.find({"languageCode": lang}):
print post['data'].encode("utf-8")
for tweet in tweets.find({"languageCode": lang}):
print tweet['data'].encode("utf-8")
|
148374
|
JOINTS = {
"HipCenter": 0,
"RHip": 1,
"RKnee": 2,
"RFoot": 3,
"LHip": 4,
"LKnee": 5,
"LFoot": 6,
"Spine": 7,
"Thorax": 8,
"Neck/Nose": 9,
"Head": 10,
"LShoulder": 11,
"LElbow": 12,
"LWrist": 13,
"RShoulder": 14,
"RElbow": 15,
"RWrist": 16,
}
EDGES = (
(0, 1),
(1, 2),
(2, 3),
(0, 4),
(4, 5),
(5, 6),
(0, 7),
(7, 8),
(8, 9),
(9, 10),
(8, 11),
(11, 12),
(12, 13),
(8, 14),
(14, 15),
(15, 16),
)
|
148381
|
from __future__ import print_function
import sys
import numpy
import pytest
import struct
from stl import mesh
_STL_FILE = '''
solid test.stl
facet normal -0.014565 0.073223 -0.002897
outer loop
vertex 0.399344 0.461940 1.044090
vertex 0.500000 0.500000 1.500000
vertex 0.576120 0.500000 1.117320
endloop
endfacet
endsolid test.stl
'''.lstrip()
def test_valid_ascii(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_ascii_with_missing_name(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
# Split the file into lines
lines = _STL_FILE.splitlines()
# Remove everything except solid
lines[0] = lines[0].split()[0]
# Join the lines to test files that start with solid without space
fh.write('\n'.join(lines))
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_ascii_with_blank_lines(tmpdir, speedups):
_stl_file = '''
solid test.stl
facet normal -0.014565 0.073223 -0.002897
outer loop
vertex 0.399344 0.461940 1.044090
vertex 0.500000 0.500000 1.500000
vertex 0.576120 0.500000 1.117320
endloop
endfacet
endsolid test.stl
'''.lstrip()
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_stl_file)
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_incomplete_ascii_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write('solid some_file.stl')
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
for offset in (-20, 82, 100):
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE[:-offset])
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_corrupt_ascii_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(40)
print('####\n' * 100, file=fh)
fh.seek(0)
if speedups and sys.version_info.major != 2:
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(40)
print(' ' * 100, file=fh)
fh.seek(80)
fh.write(struct.pack('<i', 10).decode('utf-8'))
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_corrupt_binary_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 8)
fh.write('#\0\0\0')
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 9)
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 8)
fh.write('#\0\0\0')
fh.seek(0)
fh.write('solid test.stl')
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_duplicate_polygons():
data = numpy.zeros(3, dtype=mesh.Mesh.dtype)
data['vectors'][0] = numpy.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 1.]])
data['vectors'][0] = numpy.array([[0, 0, 0],
[2, 0, 0],
[0, 2, 1.]])
data['vectors'][0] = numpy.array([[0, 0, 0],
[3, 0, 0],
[0, 3, 1.]])
assert not mesh.Mesh(data, remove_empty_areas=False).check()
|
148382
|
from federated_aggregations.channels.channel_grid import ChannelGrid
from federated_aggregations.channels.channel import Channel
from federated_aggregations.channels.channel import PlaintextChannel
from federated_aggregations.channels.channel import EasyBoxChannel
|
148400
|
import os
import re
from lxml import html
import requests
import jsbeautifier
response = requests.get('http://app.youneedabudget.com')
with open('index.html', 'w', encoding='utf-8') as file_before:
file_before.write(response.text)
parsed = html.fromstring(response.text)
for src in parsed.xpath('//script/@src'):
url_src = str(src)
file = url_src.rsplit('/',1)[-1]
if file.startswith('before.'):
before_response = requests.get(str(src))
before_script = jsbeautifier.beautify(before_response.text)
with open(os.path.join('web_app','before.js'),'w+',encoding='utf-8') as file_before:
file_before.write(before_script)
regex1 = re.compile('\s*(\d)\:\s\"appmain\"')
regex2=None
for line in before_script.split('\n'):
if regex1.match(line):
idx = regex1.match(line).groups()[0]
regex2 = re.compile('\s*%s\:\s\"(.*)\"' % idx)
if regex2 is not None and regex2.match(line):
test = regex2.match(line).groups()[0]
if test!='appmain':
random_id = test
break
url_appmain = '/'.join(url_src.rsplit('/',1)[:-1]+['appmain.'+random_id+'.js'])
appmain_response = requests.get(url_appmain)
appmain_script = jsbeautifier.beautify(appmain_response.text)
with open(os.path.join('web_app','appmain.js'),'w+',encoding='utf-8') as file_appmain:
file_appmain.write(appmain_script)
if file.startswith('index.'):
script_response = requests.get(str(src))
index_script = jsbeautifier.beautify(script_response.text)
with open(os.path.join('web_app','index.js'),'w+',encoding='utf-8') as file_before:
file_before.write(index_script)
pass
|
148414
|
import contextlib
import itertools
import os
import pathlib
import re
import subprocess
import attr
@attr.s
class Installation:
path = attr.ib(convert=pathlib.Path)
@property
def python(self):
return self.path.joinpath('python.exe')
@property
def scripts_dir(self):
return self.path.joinpath('Scripts')
@property
def pip(self):
return self.scripts_dir.joinpath('pip.exe')
def get_version_info(self):
output = subprocess.check_output(
[str(self.python), '--version'], encoding='ascii',
).strip()
match = re.match(r'^Python (\d+)\.(\d+)\.(\d+)$', output)
return tuple(int(x) for x in match.groups())
def find_script(self, name):
names = itertools.chain([name], [
'{}{}'.format(name, ext)
for ext in os.environ['PATHEXT'].split(';')
])
for name in names:
with contextlib.suppress(FileNotFoundError):
return self.scripts_dir.joinpath(name).resolve(strict=True)
raise FileNotFoundError(name)
|
148429
|
import base64
import logging
import operator
import numpy as np
from api.v1alpha1.grpc_proto.grpc_algorithm.python3 import api_pb2
logger = logging.getLogger(__name__)
class Parameter:
def __init__(self, name, space_list):
self.name = name
self.space_list = space_list
self.space_list.sort()
self.length = int(len(self.space_list))
def __str__(self):
return "Parameter(name: {}, list: {})".format(
self.name, ", ".join(self.space_list)
)
def num2str(assignments, num):
assignments.sort(key=operator.attrgetter("key"))
result = ""
for i in range(num):
result += str(assignments[i].key) + ": " + str(assignments[i].value)
if i < num - 1:
result += "-"
return result
class BaseSamplingService(object):
def __init__(self, request):
self.space = []
self.space_size = 1
for _par in request.parameters:
new_par = Parameter(_par.name, _par.feasible_space)
self.space.append(new_par)
self.space_size *= new_par.length
self.space_size = int(self.space_size)
self.space.sort(key=operator.attrgetter("name"))
self.existing_trials = {}
self.num_pars = int(len(request.parameters))
for _trial in request.existing_results:
self.existing_trials[
num2str(_trial.parameter_assignments, self.num_pars)
] = _trial.object_value
def get_assignment(self, request):
logger.info("-" * 100 + "\n")
print("-" * 100 + "\n")
logger.info("New getSuggestions call\n")
print("New getSuggestions call\n")
if request.algorithm_name == "grid":
return self.get_assignment_grid(request)
elif request.algorithm_name == "random":
return self.get_assignment_random(request)
return []
def grid_index_search(self, index):
assignments = []
for i in range(self.num_pars):
sub_space_size = 1
for j in range(i + 1, self.num_pars):
sub_space_size *= self.space[j].length
index_ = int(
(index % (sub_space_size * self.space[i].length)) / sub_space_size
)
assignments.append(
api_pb2.KeyValue(
key=self.space[i].name, value=self.space[i].space_list[index_]
)
)
assert num2str(assignments, self.num_pars) not in self.existing_trials
self.existing_trials[num2str(assignments, self.num_pars)] = -1
return assignments
def random_index_search(self):
while True:
assignments = []
for i in range(self.num_pars):
assignments.append(
api_pb2.KeyValue(
key=self.space[i].name,
value=self.space[i].space_list[
np.random.randint(self.space[i].length)
],
)
)
if num2str(assignments, self.num_pars) not in self.existing_trials:
break
assert num2str(assignments, self.num_pars) not in self.existing_trials
self.existing_trials[num2str(assignments, self.num_pars)] = -1
return assignments
def get_assignment_grid(self, request):
assignments_set = []
next_assignment_index = int(len(request.existing_results))
for _ in range(request.required_sampling):
assignments = self.grid_index_search(next_assignment_index)
assignments_set.append(api_pb2.ParameterAssignments(key_values=assignments))
next_assignment_index += 1
for assignment in assignments:
logger.info(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
print(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
logger.info("\n")
return assignments_set
def get_assignment_random(self, request):
assignments_set = []
for _ in range(request.required_sampling):
assignments = self.random_index_search()
assignments_set.append(api_pb2.ParameterAssignments(key_values=assignments))
for assignment in assignments:
logger.info(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
print(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
logger.info("\n")
return assignments_set
@staticmethod
def encode(name):
"""Encode the name. Chocolate will check if the name contains hyphens.
Thus we need to encode it.
"""
return base64.b64encode(name.encode("utf-8")).decode("utf-8")
|
148450
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import xml.etree.ElementTree as ET
import arrow
from builtins import *
from nzbhydra.categories import getByNewznabCats
from nzbhydra.exceptions import IndexerResultParsingException
from nzbhydra.nzb_search_result import NzbSearchResult
from nzbhydra.search_module import IndexerProcessingResult
from nzbhydra.searchmodules import newznab
logger = logging.getLogger('root')
class Jackett(newznab.NewzNab):
# todo feature: read caps from server on first run and store them in the config/database
def __init__(self, settings):
super(newznab.NewzNab, self).__init__(settings)
super(Jackett, self).__init__(settings)
self.settings = settings # Already done by super.__init__ but this way PyCharm knows the correct type
self.module = "jackett"
self.category_search = True
self.supportedFilters = ["maxage"]
self.supportsNot = False
def get_details_link(self, guid):
return guid
def get_entry_by_id(self, guid, title):
self.error("Function not supported")
return None
def get_search_urls(self, search_request, search_type="search"):
f = self.build_base_url(search_type, search_request.category, offset=search_request.offset)
query = search_request.query
if query:
f = f.add({"q": query})
if search_request.maxage:
f = f.add({"maxage": search_request.maxage})
return [f.url]
def process_query_result(self, xml_response, searchRequest, maxResults=None):
self.debug("Started processing results")
countRejected = self.getRejectedCountDict()
acceptedEntries = []
entries, total, offset = self.parseXml(xml_response, maxResults)
for entry in entries:
accepted, reason,ri = self.accept_result(entry, searchRequest, self.supportedFilters)
if accepted:
acceptedEntries.append(entry)
else:
countRejected[ri] += 1
self.debug("Rejected search result. Reason: %s" % reason)
if total == 0 or len(acceptedEntries) == 0:
self.info("Query returned no results")
return IndexerProcessingResult(entries=acceptedEntries, queries=[], total=0, total_known=True, has_more=False, rejected=countRejected)
else:
return IndexerProcessingResult(entries=acceptedEntries, queries=[], total=total, total_known=True, has_more=False, rejected=countRejected)
def parseXml(self, xmlResponse, maxResults=None):
entries = []
try:
tree = ET.fromstring(xmlResponse.encode('utf-8'))
except Exception:
self.exception("Error parsing XML: %s..." % xmlResponse[:500])
raise IndexerResultParsingException("Error parsing XML", self)
for item in tree.find("channel").findall("item"):
entry = self.parseItem(item)
entries.append(entry)
if maxResults is not None and len(entries) == maxResults:
break
return entries, len(entries), 0
def parseItem(self, item):
entry = self.create_nzb_search_result()
# These are the values that absolutely must be contained in the response
entry.title = item.find("title").text
entry.title = self.cleanUpTitle(entry.title)
entry.link = item.find("link").text
entry.details_link = item.find("comments").text
entry.indexerguid = item.find("guid").text
entry.comments = 0
size = item.find("size")
if size is not None:
entry.size = int(size.text)
entry.attributes = []
entry.has_nfo = NzbSearchResult.HAS_NFO_NO
categories = item.find("category")
if categories is not None:
categories = categories.text
entry.category = getByNewznabCats(categories)
attributes = item.findall("torznab:attr", {"torznab": "http://torznab.com/schemas/2015/feed"})
attributes.extend(item.findall("newznab:attr", {"newznab": "http://www.newznab.com/DTD/2010/feeds/attributes/"}))
for i in attributes:
attribute_name = i.attrib["name"]
attribute_value = i.attrib["value"]
entry.attributes.append({"name": attribute_name, "value": attribute_value})
if attribute_name == "size":
entry.size = int(attribute_value)
if attribute_name == "grabs":
entry.grabs = int(attribute_value)
entry.pubDate = item.find("pubDate").text
pubDate = arrow.get(entry.pubDate, 'ddd, DD MMM YYYY HH:mm:ss Z')
self.getDates(entry, pubDate)
entry.downloadType = "torrent"
# For some trackers several results with the same ID are returned (e.g. PTP so we need to make sure the ID is unique)
entry.indexerguid += str(entry.size)
return entry
def get_nfo(self, guid):
return False, None, "NFOs not supported by indexer"
def get_instance(indexer):
return Jackett(indexer)
|
148512
|
import os
import os.path
import sys
import random
import json
import tempfile
import shutil
from twisted.internet import reactor, defer
from twisted.trial import unittest
pd = os.path.dirname
this_dir = pd(os.path.abspath(__file__))
sys.path.append( pd(this_dir) )
sys.path.append( os.path.join(pd(this_dir), 'examples') )
sys.path.append( os.path.join(pd(pd(this_dir)), 'paxos') )
from zpax import durable
from zpax.network import test_node
from zpax.network.test_node import trace_messages, show_stacktrace
import single_value
def delay(t):
d = defer.Deferred()
reactor.callLater(t, lambda : d.callback(None) )
return d
class TestReq (object):
d = None
last_val = None
def __init__(self, channel='test_channel.clients', node_id='testcli'):
self.net = test_node.NetworkNode(node_id)
self.channel = channel
self.net.add_message_handler(channel, self)
self.net.connect([])
def close(self):
if self.d is not None and not self.d.called:
self.d.cancel()
def propose(self, to_id, instance, value, req_id='req_id'):
self.d = defer.Deferred()
self.net.unicast_message(to_id, self.channel, 'propose_value', dict(instance=instance,
proposed_value=value,
request_id=req_id))
return self.d
def query(self, to_id):
self.d = defer.Deferred()
self.net.unicast_message(to_id, self.channel, 'query_value', dict())
return self.d
def receive_proposal_result(self, from_uid, msg):
#print 'Propose Reply Received:', msg
self.d.callback(msg)
def receive_query_result(self, from_uid, msg):
#print 'Query Result Received:', msg
self.d.callback(msg)
class SingleValueTester(unittest.TestCase):
durable_key = 'durable_id_{0}'
def setUp(self):
self.nodes = dict()
self.leader = None
self.dleader = defer.Deferred()
self.dlost = None
self.clients = list()
self.all_nodes = 'a b c'.split()
self.dd_store = durable.MemoryOnlyStateStore()
test_node.setup()
def tearDown(self):
for c in self.clients:
c.close()
for n in self.all_nodes:
self.stop(n)
# In ZeroMQ 2.1.11 there is a race condition for socket deletion
# and recreation that can render sockets unusable. We insert
# a short delay here to prevent the condition from occuring.
#return delay(0.05)
def new_client(self):
zreq = TestReq()
self.clients.append(zreq)
return zreq
def start(self, node_names):
def gen_cb(x, func):
def cb():
func(x)
return cb
zpax_nodes = dict()
for node_name in node_names.split():
if not node_name in self.all_nodes or node_name in self.nodes:
continue
n = single_value.SingleValueNode(test_node.Channel('test_channel', test_node.NetworkNode(node_name)),
2,
self.durable_key.format(node_name),
self.dd_store)
n.on_leadership_acquired = gen_cb(node_name, self._on_leader_acq)
n.on_leadership_lost = gen_cb(node_name, self._on_leader_lost)
n.hb_period = 0.05
n.liveness_window = 0.15
n.name = node_name
self.nodes[node_name] = n
n.net.connect([])
n.initialize()
def stop(self, node_names):
for node_name in node_names.split():
if node_name in self.nodes:
self.nodes[node_name].shutdown()
del self.nodes[node_name]
def _on_leader_acq(self, node_id):
prev = self.leader
self.leader = node_id
if self.dleader:
d, self.dleader = self.dleader, None
reactor.callLater(0.01, lambda : d.callback( (prev, self.leader) ))
def _on_leader_lost(self, node_id):
if self.dlost:
d, self.dlost = self.dlost, None
d.callback(node_id)
@defer.inlineCallbacks
def wait_for_value_equals(self, client, to_id, value):
qval = None
while qval != value:
yield delay(0.05)
r = yield client.query(to_id)
if r['value'] is not None:
qval = r['value'][1]
@defer.inlineCallbacks
def set_value(self, client, to_id, instance, value):
yield client.propose(to_id, instance, value)
yield self.wait_for_value_equals( client, to_id, value )
#@trace_messages
@defer.inlineCallbacks
def test_initial_leader(self):
self.start('a b')
yield self.dleader
#trace_messages
@defer.inlineCallbacks
def test_set_initial_value(self):
self.start('a b')
d = defer.Deferred()
c = self.new_client()
yield self.dleader
msg = yield c.query('a')
self.assertEquals(msg, dict(current_instance=1, value=None))
yield c.propose('a', 1, 'foo')
yield self.wait_for_value_equals( c, 'a', 'foo' )
@defer.inlineCallbacks
def test_set_multiple_values(self):
self.start('a b')
d = defer.Deferred()
c = self.new_client()
yield self.dleader
msg = yield c.query('a')
self.assertEquals(msg, dict(current_instance=1, value=None))
yield c.propose('a', 1, 'foo')
yield self.wait_for_value_equals( c, 'a', 'foo' )
yield c.propose('a', 2, 'bar')
yield self.wait_for_value_equals( c, 'a', 'bar' )
yield c.propose('a', 3, 'baz')
yield self.wait_for_value_equals( c, 'a', 'baz' )
@show_stacktrace
#@trace_messages
@defer.inlineCallbacks
def test_shutdown_and_restart(self):
self.start('a b')
d = defer.Deferred()
c = self.new_client()
yield self.dleader
yield c.propose('a', 1, 'foo')
yield self.wait_for_value_equals( c, 'a', 'foo' )
self.stop('a b')
yield delay(0.05)
self.dleader = defer.Deferred()
self.start('a b')
yield self.dleader
msg = yield c.query('a')
self.assertEquals(msg['value'], ('req_id', 'foo'))
#trace_messages
@defer.inlineCallbacks
def test_shutdown_and_restart_with_outstanding_proposal(self):
self.start('a b')
d = defer.Deferred()
c = self.new_client()
yield self.dleader
yield c.propose('a', 1, 'foo')
yield self.wait_for_value_equals( c, 'a', 'foo' )
self.stop('b')
yield c.propose('a', 2, 'bar')
self.assertTrue( self.nodes['a'].pax.proposed_value is not None )
self.stop('a')
yield delay(0.05)
self.dleader = defer.Deferred()
self.start('a b')
yield self.dleader
yield self.wait_for_value_equals( c, 'a', 'bar' )
@defer.inlineCallbacks
def test_node_recovery(self):
self.start('a b c')
d = defer.Deferred()
c = self.new_client()
yield self.dleader
yield self.set_value(c, 'a', 1, 'foo')
yield self.wait_for_value_equals( c, 'c', 'foo' )
self.stop('c')
yield self.set_value(c, 'a', 2, 'bar')
yield self.set_value(c, 'a', 3, 'baz')
self.start('c')
yield self.wait_for_value_equals( c, 'c', 'baz' )
|
148513
|
from jinja2 import Undefined
from lektor.constants import PRIMARY_ALT
class BadValue(Undefined):
__slots__ = ()
def get_undefined_info(undefined):
if isinstance(undefined, Undefined):
try:
undefined._fail_with_undefined_error()
except Exception as e:
return str(e)
return "defined value"
class RawValue:
__slots__ = ("name", "value", "field", "pad")
def __init__(self, name, value=None, field=None, pad=None):
self.name = name
self.value = value
self.field = field
self.pad = pad
def _get_hint(self, prefix, reason):
if self.field is not None:
return "%s in field '%s': %s" % (prefix, self.field.name, reason)
return "%s: %s" % (prefix, reason)
def bad_value(self, reason):
return BadValue(hint=self._get_hint("Bad value", reason), obj=self.value)
def missing_value(self, reason):
return Undefined(hint=self._get_hint("Missing value", reason), obj=self.value)
class _NameDescriptor:
def __get__(self, obj, type):
rv = type.__name__
if rv.endswith("Type"):
rv = rv[:-4]
return rv.lower()
class Type:
widget = "multiline-text"
def __init__(self, env, options):
self.env = env
self.options = options
@property
def size(self):
size = self.options.get("size") or "normal"
if size not in ("normal", "small", "large"):
size = "normal"
return size
@property
def width(self):
return self.options.get("width") or "1/1"
name = _NameDescriptor()
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
return {
"name": self.name,
"widget": self.widget,
"size": self.size,
"width": self.width,
}
def value_from_raw(self, raw):
# pylint: disable=no-self-use
return raw
def value_from_raw_with_default(self, raw):
value = self.value_from_raw(raw)
if (
isinstance(value, Undefined)
and raw.field is not None
and raw.field.default is not None
):
return self.value_from_raw(
RawValue(raw.name, raw.field.default, field=raw.field, pad=raw.pad)
)
return value
def __repr__(self):
return "%s()" % self.__class__.__name__
|
148528
|
import pytest
import warnings
warnings.filterwarnings('ignore')
@pytest.mark.basic
def test_resize_ratio():
"""
Testing the resize_ratio function
Returns: Nothing
"""
import numpy as np
from deep_utils import resize_ratio
dummy_images = [np.random.randint(0, 255, (1200, 900, 3), dtype=np.uint8),
np.random.randint(0, 255, (800, 1200, 3), dtype=np.uint8),
np.random.randint(0, 255, (550, 350, 3), dtype=np.uint8),
np.random.randint(0, 255, (900, 900, 3), dtype=np.uint8),
]
preferred_outputs = [
(900, 675, 3),
(600, 900, 3),
(900, 572, 3),
(900, 900, 3),
]
for dummy_img, preferred_output in zip(dummy_images, preferred_outputs):
out = resize_ratio(dummy_img, 900)
assert out.shape == preferred_output, f"resize_ratio failed for input img.shape={dummy_img.shape}"
|
148536
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@npm//@bazel/protractor:package.bzl", "npm_bazel_protractor_dependencies")
load("@npm//@bazel/karma:package.bzl", "npm_bazel_karma_dependencies")
load("@io_bazel_rules_webtesting//web:repositories.bzl", "web_test_repositories")
load("@io_bazel_rules_webtesting//web/versioned:browsers-0.3.2.bzl", "browser_repositories")
load("@io_bazel_rules_sass//sass:sass_repositories.bzl", "sass_repositories")
def load_angular():
npm_bazel_protractor_dependencies()
npm_bazel_karma_dependencies()
web_test_repositories()
browser_repositories(
chromium = True,
firefox = True,
)
sass_repositories()
|
148538
|
import os
import logging
import posixpath
import stat
from django.utils.http import urlquote
from seaserv import seafile_api
from seahub.base.models import UserStarredFiles
from seahub.base.templatetags.seahub_tags import email2nickname, email2contact_email
from seahub.settings import ENABLE_VIDEO_THUMBNAIL, THUMBNAIL_ROOT
from seahub.thumbnail.utils import get_thumbnail_src
from seahub.utils import is_pro_version, FILEEXT_TYPE_MAP, IMAGE, XMIND, VIDEO
from seahub.utils.file_tags import get_files_tags_in_dir
from seahub.utils.repo import is_group_repo_staff, is_repo_owner
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
logger = logging.getLogger(__name__)
json_content_type = 'application/json; charset=utf-8'
HTTP_520_OPERATION_FAILED = 520
def permission_check_admin_owner(request, username, repo_id): # maybe add more complex logic in the future
"""
if repo is owned by user return true
or check whether repo is owned by group and whether user is group's staff
so finally the code is:
check user == repo's owner
else
check user is the such group's staff
"""
if is_repo_owner(request, repo_id, username):
return True
else:
return is_group_repo_staff(request, repo_id, username)
def get_dir_file_recursively(repo_id, path, all_dirs):
is_pro = is_pro_version()
path_id = seafile_api.get_dir_id_by_path(repo_id, path)
dirs = seafile_api.list_dir_by_path(repo_id, path, -1, -1)
for dirent in dirs:
entry = {}
if stat.S_ISDIR(dirent.mode):
entry["type"] = 'dir'
else:
entry["type"] = 'file'
entry['modifier_email'] = dirent.modifier
entry["size"] = dirent.size
if is_pro:
entry["is_locked"] = dirent.is_locked
entry["lock_owner"] = dirent.lock_owner
if dirent.lock_owner:
entry["lock_owner_name"] = email2nickname(dirent.lock_owner)
entry["lock_time"] = dirent.lock_time
entry["parent_dir"] = path
entry["id"] = dirent.obj_id
entry["name"] = dirent.obj_name
entry["mtime"] = timestamp_to_isoformat_timestr(dirent.mtime)
all_dirs.append(entry)
# Use dict to reduce memcache fetch cost in large for-loop.
file_list = [item for item in all_dirs if item['type'] == 'file']
contact_email_dict = {}
nickname_dict = {}
modifiers_set = {x['modifier_email'] for x in file_list}
for e in modifiers_set:
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
for e in file_list:
e['modifier_contact_email'] = contact_email_dict.get(e['modifier_email'], '')
e['modifier_name'] = nickname_dict.get(e['modifier_email'], '')
if stat.S_ISDIR(dirent.mode):
sub_path = posixpath.join(path, dirent.obj_name)
get_dir_file_recursively(repo_id, sub_path, all_dirs)
return all_dirs
def get_dir_file_info_list(username, request_type, repo_obj, parent_dir,
with_thumbnail, thumbnail_size):
repo_id = repo_obj.id
dir_info_list = []
file_info_list = []
# get dirent(folder and file) list
parent_dir_id = seafile_api.get_dir_id_by_path(repo_id, parent_dir)
dir_file_list = seafile_api.list_dir_with_perm(repo_id,
parent_dir, parent_dir_id, username, -1, -1)
try:
starred_items = UserStarredFiles.objects.filter(email=username,
repo_id=repo_id, path__startswith=parent_dir, org_id=-1)
starred_item_path_list = [f.path.rstrip('/') for f in starred_items]
except Exception as e:
logger.error(e)
starred_item_path_list = []
# only get dir info list
if not request_type or request_type == 'd':
dir_list = [dirent for dirent in dir_file_list if stat.S_ISDIR(dirent.mode)]
for dirent in dir_list:
dir_info = {}
dir_info["type"] = "dir"
dir_info["id"] = dirent.obj_id
dir_info["name"] = dirent.obj_name
dir_info["mtime"] = timestamp_to_isoformat_timestr(dirent.mtime)
dir_info["permission"] = dirent.permission
dir_info["parent_dir"] = parent_dir
dir_info_list.append(dir_info)
# get star info
dir_info['starred'] = False
dir_path = posixpath.join(parent_dir, dirent.obj_name)
if dir_path.rstrip('/') in starred_item_path_list:
dir_info['starred'] = True
# only get file info list
if not request_type or request_type == 'f':
file_list = [dirent for dirent in dir_file_list if not stat.S_ISDIR(dirent.mode)]
# Use dict to reduce memcache fetch cost in large for-loop.
nickname_dict = {}
contact_email_dict = {}
modifier_set = {x.modifier for x in file_list}
lock_owner_set = {x.lock_owner for x in file_list}
for e in modifier_set | lock_owner_set:
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
try:
files_tags_in_dir = get_files_tags_in_dir(repo_id, parent_dir)
except Exception as e:
logger.error(e)
files_tags_in_dir = {}
for dirent in file_list:
file_name = dirent.obj_name
file_path = posixpath.join(parent_dir, file_name)
file_obj_id = dirent.obj_id
file_info = {}
file_info["type"] = "file"
file_info["id"] = file_obj_id
file_info["name"] = file_name
file_info["mtime"] = timestamp_to_isoformat_timestr(dirent.mtime)
file_info["permission"] = dirent.permission
file_info["parent_dir"] = parent_dir
file_info["size"] = dirent.size
modifier_email = dirent.modifier
file_info['modifier_email'] = modifier_email
file_info['modifier_name'] = nickname_dict.get(modifier_email, '')
file_info['modifier_contact_email'] = contact_email_dict.get(modifier_email, '')
# get lock info
if is_pro_version():
file_info["is_locked"] = dirent.is_locked
file_info["lock_time"] = dirent.lock_time
lock_owner_email = dirent.lock_owner or ''
file_info["lock_owner"] = lock_owner_email
file_info['lock_owner_name'] = nickname_dict.get(lock_owner_email, '')
file_info['lock_owner_contact_email'] = contact_email_dict.get(lock_owner_email, '')
if username == lock_owner_email:
file_info["locked_by_me"] = True
else:
file_info["locked_by_me"] = False
# get star info
file_info['starred'] = False
if file_path.rstrip('/') in starred_item_path_list:
file_info['starred'] = True
# get tag info
file_tags = files_tags_in_dir.get(file_name, [])
if file_tags:
file_info['file_tags'] = []
for file_tag in file_tags:
file_info['file_tags'].append(file_tag)
# get thumbnail info
if with_thumbnail and not repo_obj.encrypted:
# used for providing a way to determine
# if send a request to create thumbnail.
fileExt = os.path.splitext(file_name)[1][1:].lower()
file_type = FILEEXT_TYPE_MAP.get(fileExt)
if file_type in (IMAGE, XMIND) or \
file_type == VIDEO and ENABLE_VIDEO_THUMBNAIL:
# if thumbnail has already been created, return its src.
# Then web browser will use this src to get thumbnail instead of
# recreating it.
thumbnail_file_path = os.path.join(THUMBNAIL_ROOT,
str(thumbnail_size), file_obj_id)
if os.path.exists(thumbnail_file_path):
src = get_thumbnail_src(repo_id, thumbnail_size, file_path)
file_info['encoded_thumbnail_src'] = urlquote(src)
file_info_list.append(file_info)
dir_info_list.sort(key=lambda x: x['name'].lower())
file_info_list.sort(key=lambda x: x['name'].lower())
return dir_info_list, file_info_list
|
148546
|
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Add, Subtract, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import RMSprop
import tensorflow.keras.backend as K
c = tf.constant(['a', 'b'])
e = tf.math.erf([1.0, -0.5, 3.4, -2.1, 0.0, -6.5])
input = tf.keras.Input(shape=(28, 28, 1), name="img")
x = tf.keras.layers.Conv2DTranspose(16, 3, activation="relu")(input)
inputs = Input(24)
x = Dense(128, activation = "relu")(inputs)
value = Dense(24)(x)
adv = Dense(1)(x)
# meam = Lambda(lambda x: K.mean(x, axis = 1, keepdims = True))(adv)
meam = adv - tf.reduce_mean(adv, axis = 1, keepdims = True)
adv = Subtract()([adv, meam])
outputs = Add()([value, adv])
model = Model(inputs, outputs)
model.compile(loss = "mse", optimizer = RMSprop(1e-3))
model.summary()
debug = 1
|
148563
|
import json
import logging
import os
from random import Random
from typing import Dict, Optional, Iterable, List
import srsly
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer, Token
from overrides import overrides
from src.data_classes.data_classes import AnnotatedSQL
from src.ext_services.jsql_parser import JSQLParser
from src.preprocessing.preprocess import clean_str, add_schema_description, SQL_TOKENS
from src.preprocessing.sql_utils import anonymize_values
from src.preprocessing.sql_utils import preprocess_for_jsql
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes,too-many-arguments
@DatasetReader.register("text2sql")
class Seq2SeqDatasetReader(DatasetReader):
def __init__(
self,
dataset_name: str,
tables_file_path: str,
train_data_files=None,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
source_max_tokens: Optional[int] = None,
target_max_tokens: Optional[int] = None,
shuffle_schema: bool = False,
use_schema: bool = True,
start_symbol: str = "<s>",
end_symbol: str = "</s>",
source_add_start_token: bool = False,
source_add_end_token: bool = False,
target_add_start_token: bool = True,
target_add_end_token: bool = False,
truncate_long_sequences_in_train: bool = None,
uncased: bool = None,
add_column_types: bool = False,
keep_sql_values: bool = True,
upper_sql: bool = False,
use_description: bool = False,
replace_column_underscore: bool = True,
filter_failed_parsed: bool = True,
random_seed: Optional[int] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if train_data_files is None:
train_data_files = []
self._train_data_files = train_data_files
with open(tables_file_path) as in_fp:
self._tables_json = json.load(in_fp)
self._dataset_name = dataset_name
self._source_tokenizer = source_tokenizer or SpacyTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers = target_token_indexers or self._source_token_indexers
self._start_token: Optional[Token] = None
self._end_token: Optional[Token] = None
self._truncate_long_sequences_in_train = (
False if truncate_long_sequences_in_train is None else truncate_long_sequences_in_train
)
logger.info("truncate_long_sequences_in_train=%s", str(self._truncate_long_sequences_in_train))
self._truncate_long_sequences = True
self._uncased = True if uncased is None else uncased
logger.info("uncased=%s", str(self._uncased))
self._source_max_tokens = source_max_tokens
self._target_max_tokens = target_max_tokens
self._source_max_truncated = 0
self._target_max_truncated = 0
self._target_max_skipped = 0
self._invalid_cleaning_sql_count = 0
self._invalid_parsing_sql_count = 0
self._start_symbol = start_symbol
self._end_symbol = end_symbol
self._source_add_start_token = source_add_start_token
self._source_add_end_token = source_add_end_token
self._target_add_start_token = target_add_start_token
self._target_add_end_token = target_add_end_token
self._random_seed = random_seed
self._shuffle_schema = shuffle_schema
self._use_schema = use_schema
self._add_column_types = add_column_types
self._keep_sql_values = keep_sql_values
self._upper_sql = upper_sql
self._replace_column_underscore = replace_column_underscore
self._use_description = use_description
self._filter_failed_parsed = filter_failed_parsed
self._random = Random(random_seed)
self._sql_parser = JSQLParser.create()
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
# Reset truncated/skipped counts
self._source_max_truncated = 0
self._target_max_truncated = 0
self._target_max_skipped = 0
self._invalid_cleaning_sql_count = 0
self._invalid_parsing_sql_count = 0
is_train = os.path.isdir(file_path)
self._truncate_long_sequences = True
if is_train and not self._truncate_long_sequences_in_train:
self._truncate_long_sequences = False
logger.info("truncate_long_sequences=%s", str(self._truncate_long_sequences))
lines = self._read_lines_from_path(file_path, is_train)
for line in lines:
if self._dataset_name == "sede":
annotated_sql: AnnotatedSQL = AnnotatedSQL(
line["QuerySetId"],
line["Title"],
line["QueryBody"],
"stackexchange",
line["Description"],
)
elif self._dataset_name == "spider":
annotated_sql: AnnotatedSQL = AnnotatedSQL(
-1,
line["question"],
line["query"],
line["db_id"],
None,
)
else:
raise ValueError(f"Dataset name {self._dataset_name} is not supported")
annotated_sql = self._preprocess_sample(annotated_sql, need_to_parse_sql=True)
line_is_valid = self._validate_line(annotated_sql, filter_failed_parsed=self._filter_failed_parsed)
if not line_is_valid:
continue
yield self.text_to_instance(annotated_sql)
self._log_statistics()
def _read_lines_from_path(self, file_path: str, is_train: bool) -> List[Dict]:
if self._dataset_name == "sede":
if is_train:
lines = []
for train_data_path in self._train_data_files:
lines.extend(srsly.read_jsonl(os.path.join(file_path, train_data_path)))
else:
lines = srsly.read_jsonl(file_path)
elif self._dataset_name == "spider":
if is_train:
lines = []
for train_data_path in self._train_data_files:
with open(os.path.join(file_path, train_data_path)) as in_fp:
lines.extend(json.load(in_fp))
else:
with open(file_path) as in_fp:
lines = json.load(in_fp)
else:
raise ValueError(f"Dataset name {self._dataset_name} is not supported")
return lines
# pylint: disable=too-many-branches
def _preprocess_sample(self, annotated_sql: AnnotatedSQL, need_to_parse_sql: bool) -> AnnotatedSQL:
# clean title and description
cleaned_title = clean_str(annotated_sql.title)
cleaned_description = clean_str(annotated_sql.description)
if self._uncased:
if cleaned_title:
cleaned_title = cleaned_title.lower()
if cleaned_description:
cleaned_description = cleaned_description.lower()
# clean SQL query
cleaned_sql = None
cleaned_sql_with_values = None
if annotated_sql.query_body:
target_with_values = preprocess_for_jsql(annotated_sql.query_body)
if target_with_values:
target_tokens = target_with_values.strip(";").split()
if not self._keep_sql_values:
target_tokens = anonymize_values(target_tokens)
if self._uncased:
target_tokens = [token.lower() for token in target_tokens]
target_with_values = target_with_values.lower()
if self._upper_sql:
target_tokens = [token.upper() if token.lower() in SQL_TOKENS else token for token in target_tokens]
target = " ".join(target_tokens)
cleaned_sql = target
cleaned_sql_with_values = target_with_values
db_json = [db for db in self._tables_json if db["db_id"] == annotated_sql.db_id][0]
schema_description, schema_structured = add_schema_description(
self._uncased, self._add_column_types, db_json, self._shuffle_schema, self._random
)
if self._use_description and cleaned_description:
if cleaned_title:
cleaned_title += f" {self._end_symbol} {cleaned_description}"
if self._use_schema:
if cleaned_title:
cleaned_title += f" {schema_description}"
parsed_sql = None
if need_to_parse_sql:
parsed_sql = self._sql_parser.translate(cleaned_sql_with_values, clean=False)
preprocessed_annotated_sql: AnnotatedSQL = AnnotatedSQL(
annotated_sql.query_set_id,
annotated_sql.title,
annotated_sql.query_body,
annotated_sql.db_id,
description=annotated_sql.description,
cleaned_title=cleaned_title,
cleaned_description=cleaned_description,
cleaned_query_body=cleaned_sql,
cleaned_query_body_with_values=cleaned_sql_with_values,
schema=schema_structured,
parsed_sql=parsed_sql,
)
return preprocessed_annotated_sql
def _validate_line(self, annotated_sql: AnnotatedSQL, filter_failed_parsed: bool = False) -> bool:
# we don't have either title and SQL
if not annotated_sql.title or not annotated_sql.query_body:
return False
# cleaning of the SQL query left us with an empty SQL
if annotated_sql.query_body and not annotated_sql.cleaned_query_body:
self._invalid_cleaning_sql_count += 1
return False
# parsing of the SQL query left us with an empty SQL
if annotated_sql.query_body and (not annotated_sql.parsed_sql) and filter_failed_parsed:
self._invalid_parsing_sql_count += 1
return False
# check length of target
if annotated_sql.cleaned_query_body is not None:
tokenized_target = [Token("<pad>")] + self._target_tokenizer.tokenize(annotated_sql.cleaned_query_body)
if (
self._target_max_tokens
and len(tokenized_target) > self._target_max_tokens
and not self._truncate_long_sequences
):
self._target_max_skipped += 1
return False
return True
def _log_statistics(self):
if self._source_max_tokens and self._source_max_truncated:
logger.info(
"In %d instances, the source token length exceeded the max limit (%d) and were truncated.",
self._source_max_truncated,
self._source_max_tokens,
)
if self._target_max_tokens and (self._target_max_truncated or self._target_max_skipped):
logger.info(
"In %d instances, the target token length exceeded the max limit (%d) and were %s.",
self._target_max_truncated if self._truncate_long_sequences else self._target_max_skipped,
self._target_max_tokens,
"truncated" if self._truncate_long_sequences else "skipped",
)
if self._invalid_cleaning_sql_count > 0:
logger.info(
"In %d instances, the SQL query was invalid after SQL cleaning and skipped.",
self._invalid_cleaning_sql_count,
)
if self._invalid_parsing_sql_count > 0:
logger.info(
"In %d instances, the SQL query was invalid after SQL parsing and skipped.",
self._invalid_parsing_sql_count,
)
# pylint: disable=arguments-differ
@overrides
def text_to_instance(self, annotated_sql: AnnotatedSQL) -> Instance:
source = annotated_sql.cleaned_title
tokenized_source = self._source_tokenizer.tokenize(source)
if self._source_max_tokens and len(tokenized_source) > self._source_max_tokens:
self._source_max_truncated += 1
tokenized_source = tokenized_source[: self._source_max_tokens]
source_field = TextField(tokenized_source, self._source_token_indexers)
metadata = {"query_set_id": annotated_sql.query_set_id}
fields = {
"source_tokens": source_field,
}
if annotated_sql.cleaned_query_body is not None:
target = annotated_sql.cleaned_query_body
tokenized_target = [Token("<pad>")] + self._target_tokenizer.tokenize(target)
if self._target_max_tokens and len(tokenized_target) > self._target_max_tokens:
self._target_max_truncated += 1
if self._truncate_long_sequences:
tokenized_target = tokenized_target[: self._target_max_tokens]
target_field = TextField(tokenized_target, self._target_token_indexers)
fields["target_tokens"] = target_field
metadata["gold_sql"] = annotated_sql.query_body
metadata["db_id"] = annotated_sql.db_id
metadata["parsed_sql"] = annotated_sql.parsed_sql
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
|
148569
|
from collections import OrderedDict, deque
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.torch.model_based.dreamer.rollout_functions import vec_rollout
class VecMdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=vec_rollout,
save_env_in_snapshot=False,
env_params=None,
env_class=None,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
self.env_params = env_params
self.env_class = env_class
def collect_new_paths(
self,
max_path_length,
num_steps,
runtime_policy=None,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
if not runtime_policy:
runtime_policy = self._policy
path = self._rollout_fn(
self._env,
runtime_policy,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path["actions"])
num_steps_collected += path_len * self._env.n_envs
paths.append(path)
self._num_paths_total += len(paths) * self._env.n_envs
self._num_steps_total += num_steps_collected
log_paths = [{} for i in range(len(paths) * self._env.n_envs)]
ctr = 0
for i, path in enumerate(paths):
for j in range(self._env.n_envs):
for k in [
"observations",
"actions",
"terminals",
"rewards",
"next_observations",
]:
log_paths[ctr][k] = path[k][1:, j]
log_paths[ctr]["agent_infos"] = [{}] * path["rewards"][1:, j].shape[0]
k = "env_infos"
log_paths[ctr][k] = [{}] * path["rewards"][1:, j].shape[0]
for key, value in path[k].items():
for z in range(value[j].shape[0]):
log_paths[ctr][k][z][key] = value[j][z]
ctr += 1
self._epoch_paths.extend(log_paths) # only used for logging
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path["actions"]) for path in self._epoch_paths]
stats = OrderedDict(
[
("num steps total", self._num_steps_total),
("num paths total", self._num_paths_total),
]
)
stats.update(
create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
)
)
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
)
if self._save_env_in_snapshot:
snapshot_dict["env"] = self._env
if self.env_params:
snapshot_dict["env_params"] = self.env_params
if self.env_class:
snapshot_dict["env_class"] = self.env_class
return snapshot_dict
|
148620
|
import os
import numpy as np
import json
import random
import jieba
import collections
from tqdm import tqdm
import config.args as args
from util.Logginger import init_logger
from pytorch_pretrained_bert.tokenization import BertTokenizer
logger = init_logger("QA", logging_path=args.log_path)
with open('TC/pybert/io/PMI_word.json','r',encoding='utf-8') as f:
PMI_word = json.load(f)
class InputExample(object):
"Template for a single data"
def __init__(self,
qas_id, # question id
question_text, # question text
doc_tokens, # context
orig_answer_text=None, # answer text
start_position=None, # For Yes, No & no-answer, start_position = 0
end_position=None, # For Yes, No & no-answer, start_position = 0
answer_type=None # We denote answer type as Yes: 0 No: 1 no-answer: 2 long-answer: 3
):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.answer_type = answer_type
class InputFeatures(object):
"Feature to feed into model"
def __init__(self,
unique_id, # feature id
example_index, # example index, note this is different from qas_id
doc_span_index, # split context index
tokens, # question token + context + flag character
adj,
token_to_orig_map, # token index before BertTokenize
token_is_max_context,
input_ids, # model input, the id of tokens
input_mask,
segment_ids, # For distinguishing question & context
start_position=None,
end_position=None,
answer_type=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.adj = adj,
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.answer_type = answer_type
def train_val_split(X, y, valid_size=0.25, random_state=2019, shuffle=True):
"""
训练集验证集分割
:param X: sentences
:param y: labels
:param random_state: 随机种子
"""
logger.info('train val split')
train, valid = [], []
bucket = [[] for _ in [i for i in range(len(args.answer_type))]]
for data_x, data_y in tqdm(zip(X, y), desc='bucket'):
bucket[int(data_y)].append((data_x, data_y))
del X, y
for bt in tqdm(bucket, desc='split'):
N = len(bt)
if N == 0:
continue
test_size = int(N * valid_size)
if shuffle:
random.seed(random_state)
random.shuffle(bt)
valid.extend(bt[:test_size])
train.extend(bt[test_size:])
if shuffle:
random.seed(random_state)
random.shuffle(valid)
random.shuffle(train)
return train, valid
def read_squad_data(raw_data, save_dir, is_training=True):
logger.info("Read raw squad data...")
logger.info("train_dev_split is %s" % str(is_training))
logger.info("test data path is %s" % raw_data)
with open(raw_data, "r", encoding="utf-8") as fr:
data = json.load(fr)
data = data["data"]
samples = []
for e in data:
paragraphs = e["paragraphs"]
# For small train, we just observed one paragraph in the paragraph list
for paragraph in paragraphs:
context = paragraph["context"]
qas = paragraph["qas"]
for qa in qas:
question = qa["question"]
answers = qa["answers"]
qid = qa["id"]
start_position = int(answers[0]["answer_start"])
end_position = int(answers[0]["answer_end"])
answer_text = answers[0]["text"]
answer_type = answers[0]["answer_type"]
assert len(answers) <= 1, "Found more than one answer for one question"
sample = {"qid": qid, "context": context,
"question": question, "answer_type": answer_type, "answer_text": answer_text,
"start_position": start_position, "end_position": end_position}
samples.append(sample)
if is_training:
y = [args.answer_type[sample["answer_type"]] for sample in samples]
train, valid = train_val_split(samples, y)
logger.info("Train set size is %d" % len(train))
logger.info("Dev set size is %d" % len(valid))
with open(os.path.join(save_dir, "train.json"), 'w') as fr:
for t in train:
print(json.dumps(t[0], ensure_ascii=False), file=fr)
with open(os.path.join(save_dir, "dev.json"), 'w') as fr:
for v in valid:
print(json.dumps(v[0], ensure_ascii=False), file=fr)
else:
with open(os.path.join(save_dir, "test.json"), 'w') as fr:
logger.info("Test set size is %d" %len(samples))
for sample in samples:
print(json.dumps(sample,ensure_ascii=False), file=fr)
def read_qa_examples(data_dir, corpus_type):
assert corpus_type in ["train", "dev", "test"], "Unknown corpus type"
examples = []
with open(os.path.join(data_dir, corpus_type +'.json'), 'r',encoding='utf-8') as fr:
for i, data in enumerate(fr):
data = json.loads(data.strip("\n"))
example = InputExample(qas_id=data["qid"],
question_text=data["question"],
doc_tokens=data["context"],
orig_answer_text=data["answer_text"],
start_position=data["start_position"],
end_position=data["end_position"],
answer_type=data["answer_type"])
examples.append(example)
return examples
def make_adj(text, tokens,max_seq_len):
# print(jieba.lcut(text,cut_all=True))
# print(tokens)
# print(max_seq_len)
print('-------------------------ningyx---------------------')
adj = np.zeros((max_seq_len,max_seq_len))
texts = jieba.lcut(text,cut_all=True)
i = 0
text_id = dict()
while(i < len(texts)):
if len(texts[i]) == 1:
i += 1
else:
l,r = [0,0]
j = 1
flag = False
while j < len(tokens)-1:
if not flag and tokens[j] in texts[i] and tokens[j+1] in texts[i]:
l = j
j += 1
flag = True
elif flag and tokens[j+1] not in texts[i]:
r = j
break
else:
j += 1
adj[l,r] = 1
adj[r,l] = 1
text_id[texts[i]] = (l,r)
i += 1
# print(text_id)
# edge of PMI
for i in range(len(texts)-1):
if texts[i] in PMI_word.keys():
for j in range(i,len(texts)):
if texts[j] in PMI_word.keys():
adj[text_id[texts[i]][1],text_id[texts[j]][0]] = 1
adj[text_id[texts[j]][0],text_id[texts[i]][1]] = 1
for i in range(len(tokens)):
adj[0,i] = 1
adj[i,0] = 1
adj[i,i] = 1
# print(adj[0,:])
return adj
def convert_examples_to_features(examples,
tokenizer,
example_type,
max_seq_length,
doc_stride,
max_query_length,
is_training):
unique_id = 10000000
features = []
all_adj = []
if os.path.isfile(example_type + '_adj.npy'):
all_adj = np.load(example_type + '_adj.npy')
adj_flag = True
else:
adj_flag = False
for example_index, example in tqdm(enumerate(examples)):
if example_index > 1000:
break
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
#quesiton index
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
if adj_flag:
adj = all_adj[doc_span_index,:,:]
else:
adj = make_adj(example.doc_tokens,tokens,max_seq_length)
all_adj.append(adj)
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
answer_type = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
if example.answer_type != "no-answer":
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
answer_type = "no-answer"
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
answer_type = example.answer_type
else:
start_position = 0
end_position = 0
answer_type = "no-answer"
answer_type = args.answer_type[answer_type]
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
logger.info("answer_type: %s" %answer_type)
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
adj = adj,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
answer_type=answer_type))
unique_id += 1
if not adj_flag:
np.save(example_type + '_adj.npy',all_adj)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
if __name__ == '__main__':
#read_squad_data("data/test.json", "data/")
examples = read_qa_examples("data/", "train")
convert_examples_to_features(examples,
tokenizer=BertTokenizer("../pretrained_model/Bert-wwm-ext/bert_vocab.txt"),
max_seq_length=512,
doc_stride=500,
max_query_length=32,
is_training=True)
|
148624
|
import configparser
import os
import subprocess
import sys
import time
import boto3
import requests
from botocore.config import Config
def convert_dev(dev):
# Translate the device name as provided by the OS to the one used by EC2
# FIXME This approach could be broken in some OS variants, see
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#identify-nvme-ebs-device
#
# A nosec comment is appended to the following line in order to disable the B605 check.
# The only current use of this script in the repo sets the `dev` arg to the value of a device name
# obtained via the OS.
if "/nvme" in dev:
return (
"/dev/"
+ os.popen("sudo /usr/local/sbin/parallelcluster-ebsnvme-id -u -b " + dev).read().strip() # nosemgrep
)
elif "/hd" in dev:
return dev.replace("hd", "sd")
elif "/xvd" in dev:
return dev.replace("xvd", "sd")
else:
return dev
def get_all_devices():
# lsblk -d -n
# xvda 202:0 0 17G 0 disk
# xvdb 202:16 0 20G 0 disk /shared
command = ["/bin/lsblk", "-d", "-n"]
try:
# fmt: off
output = subprocess.check_output( # nosec
command, stderr=subprocess.STDOUT, universal_newlines=True
).split("\n")
# fmt: on
return ["/dev/{}".format(line.split()[0]) for line in output if len(line.split()) > 0]
except subprocess.CalledProcessError as e:
print("Failed to get devices with lsblk -d -n")
raise e
def get_imdsv2_token():
# Try with getting IMDSv2 token, fall back to IMDSv1 if can not get the token
token = requests.put(
"http://169.254.169.254/latest/api/token",
headers={"X-aws-ec2-metadata-token-ttl-seconds": "300"}
)
headers = {}
if token.status_code == requests.codes.ok:
headers["X-aws-ec2-metadata-token"] = token.content
return headers
def main():
# Get EBS volume Id
try:
volume_id = str(sys.argv[1])
except IndexError:
print("Provide an EBS volume ID to attach i.e. vol-cc789ea5")
sys.exit(1)
# Get IMDSv2 token
token = get_imdsv2_token()
# Get instance ID
instance_id = requests.get("http://169.254.169.254/latest/meta-data/instance-id", headers=token).text
# Get region
region = requests.get("http://169.254.169.254/latest/meta-data/placement/availability-zone", headers=token).text
region = region[:-1]
# Generate a list of system paths minus the root path
paths = [convert_dev(device) for device in get_all_devices()]
# List of possible block devices
block_devices = [
"/dev/sdb",
"/dev/sdc",
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
"/dev/sdg",
"/dev/sdh",
"/dev/sdi",
"/dev/sdj",
"/dev/sdk",
"/dev/sdl",
"/dev/sdm",
"/dev/sdn",
"/dev/sdo",
"/dev/sdp",
"/dev/sdq",
"/dev/sdr",
"/dev/sds",
"/dev/sdt",
"/dev/sdu",
"/dev/sdv",
"/dev/sdw",
"/dev/sdx",
"/dev/sdy",
"/dev/sdz",
]
# List of available block devices after removing currently used block devices
available_devices = [a for a in block_devices if a not in paths]
# Parse configuration file to read proxy settings
config = configparser.RawConfigParser()
config.read("/etc/boto.cfg")
proxy_config = Config()
if config.has_option("Boto", "proxy") and config.has_option("Boto", "proxy_port"):
proxy = config.get("Boto", "proxy")
proxy_port = config.get("Boto", "proxy_port")
proxy_config = Config(proxies={"https": "{0}:{1}".format(proxy, proxy_port)})
# Connect to AWS using boto
ec2 = boto3.client("ec2", region_name=region, config=proxy_config)
# Attach the volume
dev = available_devices[0]
response = ec2.attach_volume(VolumeId=volume_id, InstanceId=instance_id, Device=dev)
# Poll for volume to attach
state = response.get("State")
x = 0
while state != "attached":
if x == 60:
print("Volume %s failed to mount in 300 seconds." % volume_id)
exit(1)
if state in ["busy", "detached"]:
print("Volume %s in bad state %s" % (volume_id, state))
exit(1)
print("Volume %s in state %s ... waiting to be 'attached'" % (volume_id, state))
time.sleep(5)
x += 1
try:
state = ec2.describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Attachments")[0].get("State")
except IndexError:
continue
if __name__ == "__main__":
main()
|
148628
|
from typing_extensions import Final # noqa: F401
EMAILS_FILE = 'emails.jsonl' # type: Final
USERS_FILE = 'zzusers.jsonl' # type: Final
|
148640
|
import pytest
from typing import List
from tests.globals.constants import NUMBER_OF_DOCUMENTS
from tests.globals.document import complex_nested_document, simple_nested_document
@pytest.fixture(scope="session")
def assorted_nested_documents() -> List:
return [complex_nested_document() for _ in range(NUMBER_OF_DOCUMENTS)]
@pytest.fixture(scope="session")
def simple_nested_documents() -> List:
return [simple_nested_document() for _ in range(NUMBER_OF_DOCUMENTS)]
|
148646
|
import base64
from flask import request
from functools import wraps
from flask_restful import Resource, reqparse, abort
from data.users import User, DuplicateUserError, login
class UsersApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True,
help='The desired username. Should be unique '
'within the system')
parser.add_argument('password', type=str, required=True,
help='Password, please pick something secure')
parser.add_argument('fullname', type=str, required=True,
help='Your full name')
parser.add_argument('email', type=str, required=True,
help='Your email address')
args = parser.parse_args()
try:
new_user = User.create(args['username'],
args['fullname'],
args['email'],
args['password'])
return {
'username': new_user.username,
'fullname': new_user.fullname,
'email': new_user.email
}, 201
except DuplicateUserError:
abort(409, message='A user with this username already exists')
class LoginApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True,
help='Your username')
parser.add_argument('password', type=str, required=True,
help='Your password')
args = parser.parse_args()
token = login(args['username'], args['password'])
if token is None:
abort(403)
return {
'token': token
}
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
auth_string = request.headers['Authorization']
if not auth_string.lower().startswith('bearer '):
abort(403)
token = auth_string[7:] # the bit after 'bearer '
user = User.from_token(token)
if user is None:
abort(403)
request.user = user
return f(*args, **kwargs)
return decorated_function
|
148707
|
import numpy as np
import unittest
from monte_carlo_tree_search import Node, MCTS, ucb_score
from game import Connect2Game
class MCTSTests(unittest.TestCase):
def test_mcts_from_root_with_equal_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [0, 0, 1, -1]
return np.array([0.26, 0.24, 0.24, 0.26]), 0.0001
game = Connect2Game()
args = {'num_simulations': 50}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, 0, 0]
print("starting")
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1 or 2
best_outer_move = max(root.children[0].visit_count, root.children[0].visit_count)
best_center_move = max(root.children[1].visit_count, root.children[2].visit_count)
self.assertGreater(best_center_move, best_outer_move)
def test_mcts_finds_best_move_with_really_bad_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [0, 0, 1, -1]
return np.array([0.3, 0.7, 0, 0]), 0.0001
game = Connect2Game()
args = {'num_simulations': 25}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, 1, -1]
print("starting")
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1
self.assertGreater(root.children[1].visit_count, root.children[0].visit_count)
def test_mcts_finds_best_move_with_equal_priors(self):
class MockModel:
def predict(self, board):
return np.array([0.51, 0.49, 0, 0]), 0.0001
game = Connect2Game()
args = { 'num_simulations': 25 }
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, -1, 1]
root = mcts.run(model, canonical_board, to_play=1)
# the better move is to play at index 1
self.assertLess(root.children[0].visit_count, root.children[1].visit_count)
def test_mcts_finds_best_move_with_really_really_bad_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [-1, 0, 0, 0]
return np.array([0, 0.3, 0.3, 0.3]), 0.0001
game = Connect2Game()
args = {'num_simulations': 100}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [-1, 0, 0, 0]
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1
self.assertGreater(root.children[1].visit_count, root.children[2].visit_count)
self.assertGreater(root.children[1].visit_count, root.children[3].visit_count)
class NodeTests(unittest.TestCase):
def test_initialization(self):
node = Node(0.5, to_play=1)
self.assertEqual(node.visit_count, 0)
self.assertEqual(node.prior, 0.5)
self.assertEqual(len(node.children), 0)
self.assertFalse(node.expanded())
self.assertEqual(node.value(), 0)
def test_selection(self):
node = Node(0.5, to_play=1)
c0 = Node(0.5, to_play=-1)
c1 = Node(0.5, to_play=-1)
c2 = Node(0.5, to_play=-1)
node.visit_count = 1
c0.visit_count = 0
c2.visit_count = 0
c2.visit_count = 1
node.children = {
0: c0,
1: c1,
2: c2,
}
action = node.select_action(temperature=0)
self.assertEqual(action, 2)
def test_expansion(self):
node = Node(0.5, to_play=1)
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
self.assertEqual(len(node.children), 4)
self.assertTrue(node.expanded())
self.assertEqual(node.to_play, to_play)
self.assertEqual(node.children[0].prior, 0.25)
self.assertEqual(node.children[1].prior, 0.15)
self.assertEqual(node.children[2].prior, 0.50)
self.assertEqual(node.children[3].prior, 0.10)
def test_ucb_score_no_children_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 0
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
self.assertEqual(score_2, node.children[2].prior)
self.assertEqual(score_3, node.children[3].prior)
def test_ucb_score_one_child_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 1
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
# If we visit one child once, its score is halved
self.assertEqual(score_2, node.children[2].prior / 2)
self.assertEqual(score_3, node.children[3].prior)
action, child = node.select_child()
self.assertEqual(action, 0)
def test_ucb_score_one_child_visited_twice(self):
node = Node(0.5, to_play=1)
node.visit_count = 2
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 2
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
action, child = node.select_child()
# Now that we've visited the second action twice, we should
# end up trying the first action
self.assertEqual(action, 0)
def test_ucb_score_no_children_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 1
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
# If we visit one child once, its score is halved
self.assertEqual(score_2, node.children[2].prior / 2)
self.assertEqual(score_3, node.children[3].prior)
if __name__ == '__main__':
unittest.main()
|
148772
|
import os
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from frontend.models import RegionalTeam
class TestImportRegionalTeams(TestCase):
def test_import_stps(self):
path = os.path.join(settings.APPS_ROOT, "pipeline", "test-data", "eauth.csv")
call_command("import_regional_teams", "--filename", path)
self.assertEqual(RegionalTeam.objects.count(), 6)
rt = RegionalTeam.objects.get(code="Y54")
self.assertEqual(rt.name, "NORTH OF ENGLAND COMMISSIONING REGION")
self.assertEqual(str(rt.open_date), "2012-10-01")
self.assertEqual(rt.close_date, None)
rt = RegionalTeam.objects.get(code="Y57")
self.assertEqual(str(rt.open_date), "2012-10-01")
self.assertEqual(str(rt.close_date), "2018-03-31")
|
148776
|
import inspect
from itertools import islice
from functools import partial
from types import BuiltinFunctionType
def _get_name(func):
"""get a function's name"""
if hasattr(func, '__name__'):
if func.__name__ == '<lambda>':
# this is pretty sketchy
return inspect.getsource(func).strip()
return func.__name__
elif isinstance(func, partial):
return _get_name(func.func)
else:
return func.__class__.__name__
def name(func, *args, **kwargs):
"""get a function's name, with called args if any"""
name = _get_name(func)
args = ', '.join([ags for ags in [
', '.join(map(str, args)),
', '.join(['{}={}'.format(k, v) for k, v in kwargs.items()])
] if ags])
if args:
return '{}({})'.format(name, args)
return name
def signature(func):
"""get a function's signature"""
if inspect.isclass(func):
return str(inspect.signature(func.__call__))
elif isinstance(func, partial):
return str(signature(func.func))
else:
try:
return str(inspect.signature(func))
except ValueError:
if isinstance(func, BuiltinFunctionType):
return '(builtin)'
else:
raise
|
148777
|
from ext.ftp.manager import FTPManager
from tests.main import MainTestClass
from zeex.core.ctrls.ftp import FtpReply, FtpManager, Downloader
import pytest
import os
class TestFTPManager(MainTestClass):
@pytest.fixture
def manager(self):
sample_connection = ['speedtest.tele2.net', 'anonymous', 'guest']
ftpm = FTPManager()
ftpm.add_connection(*sample_connection, name='speedtest')
return ftpm
def test_download(self, manager, example_file_path):
pytest.skip("Takes too darn long to test this...no point.")
to_dir = os.path.dirname(example_file_path)
files = ['100KB.zip']
with manager.connection('speedtest') as f:
for name in files:
to_path = os.path.join(to_dir, name)
if os.path.exists(to_path):
os.remove(to_path)
f.download(name, to_path)
assert os.path.exists(to_path)
os.remove(to_path)
def test_ftp_manager(self):
pytest.skip()
|
148796
|
import torch
from mmdet3d.ops import SparseBasicBlock
from mmdet3d.ops import spconv as spconv
def test_SparseUNet():
from mmdet3d.models.middle_encoders.sparse_unet import SparseUNet
self = SparseUNet(in_channels=4, sparse_shape=[41, 1600, 1408])
# test encoder layers
assert len(self.encoder_layers) == 4
assert self.encoder_layers.encoder_layer1[0][0].in_channels == 16
assert self.encoder_layers.encoder_layer1[0][0].out_channels == 16
assert isinstance(self.encoder_layers.encoder_layer1[0][0],
spconv.conv.SubMConv3d)
assert isinstance(self.encoder_layers.encoder_layer1[0][1],
torch.nn.modules.batchnorm.BatchNorm1d)
assert isinstance(self.encoder_layers.encoder_layer1[0][2],
torch.nn.modules.activation.ReLU)
assert self.encoder_layers.encoder_layer4[0][0].in_channels == 64
assert self.encoder_layers.encoder_layer4[0][0].out_channels == 64
assert isinstance(self.encoder_layers.encoder_layer4[0][0],
spconv.conv.SparseConv3d)
assert isinstance(self.encoder_layers.encoder_layer4[2][0],
spconv.conv.SubMConv3d)
# test decoder layers
assert isinstance(self.lateral_layer1, SparseBasicBlock)
assert isinstance(self.merge_layer1[0], spconv.conv.SubMConv3d)
assert isinstance(self.upsample_layer1[0], spconv.conv.SubMConv3d)
assert isinstance(self.upsample_layer2[0], spconv.conv.SparseInverseConv3d)
voxel_features = torch.tensor([[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32) # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32) # n, 4(batch, ind_x, ind_y, ind_z)
unet_ret_dict = self.forward(voxel_features, coordinates, 2)
seg_features = unet_ret_dict['seg_features']
spatial_features = unet_ret_dict['spatial_features']
assert seg_features.shape == torch.Size([4, 16])
assert spatial_features.shape == torch.Size([2, 256, 200, 176])
def test_SparseBasicBlock():
voxel_features = torch.tensor([[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32) # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32) # n, 4(batch, ind_x, ind_y, ind_z)
# test
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coordinates,
[41, 1600, 1408], 2)
self = SparseBasicBlock(
4,
4,
conv_cfg=dict(type='SubMConv3d', indice_key='subm1'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
# test conv and bn layer
assert isinstance(self.conv1, spconv.conv.SubMConv3d)
assert self.conv1.in_channels == 4
assert self.conv1.out_channels == 4
assert isinstance(self.conv2, spconv.conv.SubMConv3d)
assert self.conv2.out_channels == 4
assert self.conv2.out_channels == 4
assert self.bn1.eps == 1e-3
assert self.bn1.momentum == 0.01
out_features = self(input_sp_tensor)
assert out_features.features.shape == torch.Size([4, 4])
def test_make_sparse_convmodule():
from mmdet3d.ops import make_sparse_convmodule
voxel_features = torch.tensor([[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32) # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32) # n, 4(batch, ind_x, ind_y, ind_z)
# test
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coordinates,
[41, 1600, 1408], 2)
sparse_block0 = make_sparse_convmodule(
4,
16,
3,
'test0',
stride=1,
padding=0,
conv_type='SubMConv3d',
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
order=('conv', 'norm', 'act'))
assert isinstance(sparse_block0[0], spconv.SubMConv3d)
assert sparse_block0[0].in_channels == 4
assert sparse_block0[0].out_channels == 16
assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d)
assert sparse_block0[1].eps == 0.001
assert sparse_block0[1].momentum == 0.01
assert isinstance(sparse_block0[2], torch.nn.ReLU)
# test forward
out_features = sparse_block0(input_sp_tensor)
assert out_features.features.shape == torch.Size([4, 16])
sparse_block1 = make_sparse_convmodule(
4,
16,
3,
'test1',
stride=1,
padding=0,
conv_type='SparseInverseConv3d',
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
order=('norm', 'act', 'conv'))
assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d)
assert isinstance(sparse_block1[1], torch.nn.ReLU)
assert isinstance(sparse_block1[2], spconv.SparseInverseConv3d)
|
148824
|
import io
import socket
import ssl
import sys
if sys.version_info.major == 3:
text_stream_types = io.TextIOBase
bytes_stream_types = io.BufferedIOBase
else:
text_stream_types = io.TextIOBase
bytes_stream_types = io.BufferedIOBase, file # noqa: F821
SYSLOG_PORT = 514
# RFC6587 framing
FRAMING_OCTET_COUNTING = 1
FRAMING_NON_TRANSPARENT = 2
class TCPSocketTransport:
def __init__(self, address, timeout, framing):
self.socket = None
self.address = address
self.timeout = timeout
self.framing = framing
self.open()
def open(self):
error = None
host, port = self.address
addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
if not addrinfo:
raise OSError("getaddrinfo returns an empty list")
for entry in addrinfo:
family, socktype, _, _, sockaddr = entry
try:
self.socket = socket.socket(family, socktype)
self.socket.settimeout(self.timeout)
self.socket.connect(sockaddr)
# Connected successfully. Erase any previous errors.
error = None
break
except OSError as e:
error = e
if self.socket is not None:
self.socket.close()
if error is not None:
raise error
def transmit(self, syslog_msg):
# RFC6587 framing
if self.framing == FRAMING_NON_TRANSPARENT:
syslog_msg = syslog_msg.replace(b"\n", b"\\n")
syslog_msg = b"".join((syslog_msg, b"\n"))
else:
syslog_msg = b" ".join((str(len(syslog_msg)).encode("ascii"), syslog_msg))
try:
self.socket.sendall(syslog_msg)
except (OSError, IOError):
self.close()
self.open()
self.socket.sendall(syslog_msg)
def close(self):
self.socket.close()
class TLSSocketTransport(TCPSocketTransport):
def __init__(
self,
address,
timeout,
framing,
tls_ca_bundle,
tls_verify,
tls_client_cert,
tls_client_key,
tls_key_password,
):
self.tls_ca_bundle = tls_ca_bundle
self.tls_verify = tls_verify
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_key_password = tls_key_password
super(TLSSocketTransport, self).__init__(address, timeout, framing=framing)
def open(self):
super(TLSSocketTransport, self).open()
context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=self.tls_ca_bundle
)
context.verify_mode = ssl.CERT_REQUIRED if self.tls_verify else ssl.CERT_NONE
server_hostname, _ = self.address
if self.tls_client_cert:
context.load_cert_chain(
self.tls_client_cert, self.tls_client_key, self.tls_key_password
)
self.socket = context.wrap_socket(self.socket, server_hostname=server_hostname)
class UDPSocketTransport:
def __init__(self, address, timeout):
self.socket = None
self.address = address
self.timeout = timeout
self.open()
def open(self):
error = None
host, port = self.address
addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_DGRAM)
if not addrinfo:
raise OSError("getaddrinfo returns an empty list")
for entry in addrinfo:
family, socktype, _, _, sockaddr = entry
try:
self.socket = socket.socket(family, socktype)
self.socket.settimeout(self.timeout)
self.address = sockaddr
break
except OSError as e:
error = e
if self.socket is not None:
self.socket.close()
if error is not None:
raise error
def transmit(self, syslog_msg):
try:
self.socket.sendto(syslog_msg, self.address)
except (OSError, IOError):
self.close()
self.open()
self.socket.sendto(syslog_msg, self.address)
def close(self):
self.socket.close()
class UnixSocketTransport:
def __init__(self, address, socket_type):
self.socket = None
self.address = address
self.socket_type = socket_type
self.open()
def open(self):
if self.socket_type is None:
socket_types = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
else:
socket_types = [self.socket_type]
for type_ in socket_types:
# Syslog server may be unavailable during handler initialisation.
# So we ignore connection errors
try:
self.socket = socket.socket(socket.AF_UNIX, type_)
self.socket.connect(self.address)
self.socket_type = type_
break
except OSError:
if self.socket is not None:
self.socket.close()
def transmit(self, syslog_msg):
try:
self.socket.send(syslog_msg)
except (OSError, IOError):
self.close()
self.open()
self.socket.send(syslog_msg)
def close(self):
self.socket.close()
class StreamTransport:
def __init__(self, stream):
if isinstance(stream, text_stream_types):
self.text_mode = True
elif isinstance(stream, bytes_stream_types):
self.text_mode = False
else:
raise ValueError("Stream is not of a valid stream type")
if not stream.writable():
raise ValueError("Stream is not a writeable stream")
self.stream = stream
def transmit(self, syslog_msg):
syslog_msg = syslog_msg + b"\n"
if self.text_mode:
syslog_msg = syslog_msg.decode(self.stream.encoding, "replace")
self.stream.write(syslog_msg)
def close(self):
# Closing the stream is left up to the user.
pass
|
148879
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
import os
from sheetsite.site_queue import app
import smtplib
@app.task
def notify_one(email, subject, page, text):
print("send [%s] / %s / %s" % (email, subject, page))
server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server_ssl.ehlo() # optional, called by login()
me = os.environ['GMAIL_USERNAME']
server_ssl.login(me, os.environ['GMAIL_PASSWORD'])
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = me
msg['To'] = email
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(page, 'html')
msg.attach(part1)
msg.attach(part2)
server_ssl.sendmail(me, email, msg.as_string())
server_ssl.close()
return True
@app.task
def notify_all(name, site_params, diff_html, diff_text):
print("NOTIFY_spreadsheet", site_params, name)
import daff
import jinja2
import premailer
root = os.environ['SHEETSITE_CACHE']
path = os.path.join(root, name)
print("Should look in", path)
notifications = None
for fname in ['private.json', 'public.json']:
full_fname = os.path.join(path, fname)
print("Look in", full_fname)
book = json.loads(open(full_fname).read())
if 'notifications' in book['tables']:
notifications = book['tables']['notifications']
break
if notifications is None:
print("No notifications requested")
return True
print("Notifications", notifications)
# make a html report
css = daff.DiffRender().sampleCss()
site_params = dict(site_params)
site_params['css'] = css
site_params['diff'] = diff_html
env = jinja2.Environment(loader=jinja2.PackageLoader('sheetsite', 'templates'))
template = env.get_template('update.html')
page = template.render(site_params)
page = premailer.transform(page)
site_params['diff'] = diff_text
template = env.get_template('update.txt')
page_text = template.render(site_params)
for target in notifications['rows']:
email = target.get('EMAIL', None)
if email is None:
email = target.get('email', None)
if email is not None:
if site_params['no_notify']:
print("skip email to {}".format(email))
else:
notify_one.delay(email=email,
subject="update to {}".format(site_params.get('name',
'directory')),
page=page,
text=page_text)
return True
|
148904
|
from __future__ import absolute_import
import json
from six import string_types
from jet_bridge_base.fields.field import Field
class JSONField(Field):
field_error_messages = {
'invalid': 'not a valid JSON'
}
def to_internal_value_item(self, value):
if isinstance(value, string_types):
try:
return json.loads(value)
except ValueError:
self.error('invalid')
else:
return value
def to_representation_item(self, value):
return value
|
148918
|
import xlrd
import functools
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from .base import (BasePriceList, hourly_rates_only_validator,
min_price_validator)
from .spreadsheet_utils import generate_column_index_map, safe_cell_str_value
from .coercers import (strip_non_numeric, extract_min_education,
extract_hour_unit_of_issue)
from contracts.models import EDUCATION_CHOICES
DEFAULT_SHEET_NAME = 'Labor Category'
EXAMPLE_SHEET_ROWS = [
[
r'SIN(s) PROPOSED',
r'SERVICE PROPOSED (e.g. Job Title/Task)',
r'MINIMUM EDUCATION/ CERTIFICATION LEVEL',
r'MINIMUM YEARS OF EXPERIENCE',
r'COMMERCIAL LIST PRICE (CPL) OR MARKET PRICES',
r'UNIT OF ISSUE (e.g. Hour, Task, Sq ft)',
r'MOST FAVORED CUSTOMER (MFC)',
r'DISCOUNT OFFERED TO MFC (%)',
r'MFC PRICE',
r'GSA(%) DISCOUNT (exclusive of the .75% IFF)',
r'PRICE OFFERED TO GSA (excluding IFF)',
r'PRICE OFFERED TO GSA (including IFF)',
r'QUANTITY/VOLUME DISCOUNT',
],
[
r'712-3',
r'Project Manager',
r'High School',
r'3',
r'',
r'',
r'',
r'',
r'',
r'',
r'',
r'95.00',
r'',
],
]
DEFAULT_FIELD_TITLE_MAP = {
'sin': 'SIN(s) Proposed',
'labor_category': 'Service Proposed (e.g. Job Title/Task)', # noqa
'education_level': 'Minimum Education / Certification Level',
'min_years_experience': 'Minimum Years of Experience',
'unit_of_issue': 'Unit of Issue (e.g. Hour, Task, Sq Ft)',
'price_including_iff': 'Price Offered to GSA (including IFF)',
}
def glean_labor_categories_from_file(f, sheet_name=DEFAULT_SHEET_NAME):
book = xlrd.open_workbook(file_contents=f.read())
return glean_labor_categories_from_book(book, sheet_name)
def glean_labor_categories_from_book(book, sheet_name=DEFAULT_SHEET_NAME):
if sheet_name not in book.sheet_names():
raise ValidationError(
'There is no sheet in the workbook called "%s".' % sheet_name
)
sheet = book.sheet_by_name(sheet_name)
rownum = 1 # start on first row after heading row
cats = []
heading_row = sheet.row(0)
col_idx_map = generate_column_index_map(heading_row,
DEFAULT_FIELD_TITLE_MAP)
coercion_map = {
'price_including_iff': strip_non_numeric,
'min_years_experience': int,
'education_level': extract_min_education,
'unit_of_issue': extract_hour_unit_of_issue,
}
while True:
cval = functools.partial(safe_cell_str_value, sheet, rownum)
sin = cval(col_idx_map['sin'])
price_including_iff = cval(col_idx_map['price_including_iff'],
coercer=strip_non_numeric)
is_price_ok = (price_including_iff.strip() and
float(price_including_iff) > 0)
if not sin.strip() and not is_price_ok:
break
cat = {}
for field, col_idx in col_idx_map.items():
coercer = coercion_map.get(field, None)
cat[field] = cval(col_idx, coercer=coercer)
cats.append(cat)
rownum += 1
return cats
class Region3PriceListRow(forms.Form):
sin = forms.CharField(label='SIN(s) Proposed')
labor_category = forms.CharField(
label="SERVICE PROPOSED (e.g. Job Title/Task)"
)
education_level = forms.CharField(
label="Minimum Education / Certification Level"
)
min_years_experience = forms.IntegerField(
label="Minimum Years of Experience"
)
unit_of_issue = forms.CharField(
label="Unit of issue",
required=True,
validators=[hourly_rates_only_validator]
)
price_including_iff = forms.DecimalField(
label='Price Offered to GSA (including IFF)',
validators=[min_price_validator]
)
def clean_education_level(self):
value = self.cleaned_data['education_level']
values = [choice[1] for choice in EDUCATION_CHOICES]
if value not in values:
raise ValidationError('This field must contain one of the '
'following values: %s' % (', '.join(values)))
return value
def contract_model_education_level(self):
# Note that due to the way we've cleaned education_level, this
# code is guaranteed to work.
return [
code for code, name in EDUCATION_CHOICES
if name == self.cleaned_data['education_level']
][0]
def contract_model_base_year_rate(self):
return self.cleaned_data['price_including_iff']
class Region3PriceList(BasePriceList):
title = '71_IIK'
table_template = 'data_capture/price_list/tables/region_3.html'
upload_example_template = ('data_capture/price_list/upload_examples/'
'region_3.html')
upload_widget_extra_instructions = 'XLS or XLSX format, please.'
def __init__(self, rows):
super().__init__()
self.rows = rows
for row in self.rows:
form = Region3PriceListRow(row)
if form.is_valid():
self.valid_rows.append(form)
else:
self.invalid_rows.append(form)
def add_to_price_list(self, price_list):
for row in self.valid_rows:
price_list.add_row(
labor_category=row.cleaned_data['labor_category'],
education_level=row.contract_model_education_level(),
min_years_experience=row.cleaned_data['min_years_experience'],
base_year_rate=row.contract_model_base_year_rate(),
sin=row.cleaned_data['sin']
)
def serialize(self):
return self.rows
def to_table(self):
return render_to_string(self.table_template,
{'rows': self.valid_rows})
def to_error_table(self):
return render_to_string(self.table_template,
{'rows': self.invalid_rows})
@classmethod
def get_upload_example_context(cls):
return {
'sheet_name': DEFAULT_SHEET_NAME,
'sheet_rows': EXAMPLE_SHEET_ROWS,
}
@classmethod
def deserialize(cls, rows):
return cls(rows)
@classmethod
def load_from_upload(cls, f):
try:
rows = glean_labor_categories_from_file(f)
return Region3PriceList(rows)
except ValidationError:
raise
except Exception as e:
raise ValidationError(
"An error occurred when reading your Excel data."
)
|
148991
|
import logging
import os
from os.path import isfile, join
import numpy as np
from data_io import file_reading
from data_io import x_y_spliting
#import matplotlib.pyplot as plt
def data_plot(data_file, class_column=0, delimiter=' '):
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
y_min = min(y_vector)
y_max = max(y_vector)
x_row, x_col = x_matrix.shape
attr_len = x_col/attr_num
x_matrix = x_matrix.reshape(x_row, attr_num, attr_len)
for label in range(y_min, y_max):
out_pdf = "asl_class_" + str(label) + ".pdf"
fig = plt.figure()
label_index = np.where(y_vector==label)[0]
label_row = x_matrix[label_index[0], :, :]
for attr in range(0, attr_num):
plot_series = label_row[attr, :]
plot_len = len(plot_series)
stop_i = plot_len
for i in range(0, plot_len):
re_i = plot_len - i - 1
if plot_series[re_i] == 0:
stop_i = stop_i - 1
else:
break
plt.plot(plot_series[0:stop_i])
fig.savefig(out_pdf, dpi=fig.dpi)
def data_checking(data_file, class_column=0, delimiter=' '):
ret_str = ""
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
ret_str = 'x_matrix shape: ' + str(x_matrix.shape)
y_min = min(y_vector)
y_max = max(y_vector)
ret_str = ret_str + "\nclass labels from " + str(y_min) + " to " + str(y_max)
#for i in range(y_min, y_max+1):
# ret_str = ret_str + '\nclass '+ str(i) + ': '+str(y_vector.count(i))
unique, counts = np.unique(y_vector, return_counts=True)
ret_str = ret_str +'\n'+ str(dict(zip(unique, counts)))
return ret_str
def arc_reduce_null(fname, null_class=1, null_max=1000, class_column=0, delimiter=' ', header = True):
num = 0
data_matrix = []
null_count = 0
with open(fname) as f:
data_row = []
for line in f:
if header == True:
attr_num = int(line.strip())
header = False
continue
data_row = line.split(delimiter)
if int(data_row[class_column]) == null_class:
null_count = null_count + 1
if null_count < null_max:
data_matrix.append(data_row)
else:
data_matrix.append(data_row)
row_num = len(data_matrix)
col_num = len(data_matrix[0])
data_matrix = np.array(data_matrix, dtype=float).reshape(row_num, col_num)
data_matrix.astype(float)
y_vector = data_matrix[:, class_column].astype(int)
return np.delete(data_matrix, class_column, 1), y_vector
if __name__ == '__main__':
#data_file = '../../data/gesture_data/processed_data/data.txt_trainTest10/train_0.txt'
#data_file = '../../data/arc_activity_recognition/s1_ijcal/train.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
#data_file = '../../data/arc_activity_recognition/s1_ijcal/test.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/evn/ds/DS_all_ready_to_model.csv_trainTest2_weekly_3attr/test_0.txt'
#data_file = '../../data/human/subject10_ideal.log'
#class_column = 119
#delimiter = '\t'
##null_class=1
##null_max=1000
##x_matrix, y_vector = readFile(data_file, null_class, null_max, class_column);
##print x_matrix.shape
##print y_vector.shape
#
#data_file = '../../data/human/processed/ready/data.txt'#_trainTest10/train_0.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/dsa/train_test_10_fold/test_0.txt'
#data_file = '../../data/dsa/output.txt'
#data_file = '../../data/rar/train_test_10_fold_class_based/train_0.txt_class_0.txt'
#data_file = "../../data/arabic/train_test_1_fold/train_0.txt"
#data_file = "../../data/arabic/train_test_1_fold/test_0.txt"
#data_file = "../../data/asl/train_test_3_fold/train_0.txt"
#data_file = '../../data/rar/train_test_10_fold/test_0.txt'
#data_file = '../../data/arc/train_test_10_fold/test_0.txt'
#data_file = '../../data/fixed_arc/train_test_1_fold/test_0.txt'
data_key = "phs"
data_key = "eeg"
#data_key = "fad"
data_file = "../../data/" + data_key +"/train.txt"
class_column = 0
delimiter = ' '
#data_plot(data_file, class_column, delimiter)
ret_str = data_checking(data_file, class_column, delimiter)
print(ret_str)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.