id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1647691
|
import argparse
from datetime import datetime
import time
import os
from tqdm import trange, tqdm
from timeit import default_timer as timer
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from perlin import TileableNoise
from math import sin, pi
from random import random, seed, uniform, randrange
from scene_storage import *
try:
from manta import *
import gc
except ImportError:
pass
import sys
sys.path.append(sys.path[0]+"/../")
from keras_models_combined_cleansplit import *
from keras_data import read_args_file
from scipy import ndimage
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--load_path", type=str, required=True)
parser.add_argument('--warmup_steps', type=int, default=10)
parser.add_argument('--randomized_warmup_steps', action='store_true')
parser.add_argument('--min_warmup_steps', type=int, default=10)
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--num_frames', type=int, default=100)
parser.add_argument('--num_scenes', type=int, default=1)
parser.add_argument('--output_images', action='store_true')
parser.add_argument('--dont_delete_images', action='store_true')
parser.add_argument('--output_uni', action='store_true')
parser.add_argument('--additional_inflow', action='store_true')
parser.add_argument('--random_sink', action='store_true')
parser.add_argument('--random_obstacle', action='store_true')
parser.add_argument('--second_order_density_advection', action='store_true')
parser.add_argument('--show_gui', action='store_true')
parser.add_argument('--classic_ae', action='store_true')
parser.add_argument('--profile', action='store_true')
parser.add_argument('--upres', action='store_true')
parser.add_argument('--load_warmup_from_disk', action='store_true')
parser.add_argument('--override_vel', action='store_true')
parser.add_argument('--min_vel', type=float, default=0.0)
parser.add_argument('--max_vel', type=float, default=0.0)
parser.add_argument('--randomize_vel', action='store_true')
add_storage_args(parser)
args = parser.parse_args()
warmup_steps = args.warmup_steps
randomized_warmup_steps = args.randomized_warmup_steps
min_warmup_steps = args.min_warmup_steps
nseed = args.seed
num_frames = args.num_frames
num_scenes = args.num_scenes
output_images = args.output_images
dont_delete_images = args.dont_delete_images
output_uni = args.output_uni
prediction_type = args.prediction_type
screenshot_path_format = args.screenshot_path_format
field_path_format = args.field_path_format
show_gui = args.show_gui
classic_ae = args.classic_ae
profile = args.profile
upres = args.upres
second_order_density_advection = args.second_order_density_advection
load_warmup_from_disk = args.load_warmup_from_disk
override_vel = args.override_vel
min_vel_override = args.min_vel
max_vel_override = args.max_vel
randomize_vel = args.randomize_vel
model_name = args.load_path.rstrip(os.path.sep+"/\\")
model_name = model_name.split(os.path.sep)[-2:]
if model_name[1] == "checkpoint":
model_name = model_name[0]
else:
model_name = model_name[1]
print(model_name)
log_dir = create_folder_hierarchy("pred_smoke_karman", model_name, args.prediction_type, nseed)
dump_metadata(log_dir, args)
perf_data_path = log_dir
log_dir += "%06d/"
# Load input_args.json
with open(find_input_args_file(args.load_path)) as f:
config_json = json.load(f)
config = DictToNamespace(config_json)
# read config entries
input_frame_count = config.input_frame_count
prediction_window = config.w_num
decode_predictions = config.decode_predictions
skip_pred_steps = config.skip_pred_steps
init_state_network = config.init_state_network
in_out_states = config.in_out_states
pred_gradient_loss = config.pred_gradient_loss
ls_prediction_loss = config.ls_prediction_loss
ls_supervision = config.ls_supervision
sqrd_diff_loss = config.sqrd_diff_loss
ls_split = config.ls_split
model_base_dir = find_model_base_dir(args.load_path)
data_args_path = None
if os.path.exists( os.path.join( model_base_dir, "data_args.txt")):
data_args_path = os.path.join(model_base_dir, "data_args.txt")
dataset_meta_info = read_args_file(data_args_path)
else:
data_args_path = os.path.join(config.data_path, "args.txt")
dataset_meta_info = read_args_file(data_args_path)
sup_param_count = max(1,int(dataset_meta_info['num_param']) - 2) # two parameters are always present -> scene num and frame num
res_x = int(dataset_meta_info["resolution_x"])
res_y = int(dataset_meta_info["resolution_y"])
res_z = int(dataset_meta_info["resolution_z"])
in_out_dim = 3 if "density" in config.data_type else 2
in_out_dim = in_out_dim + 1 if config.is_3d else in_out_dim
input_shape = (input_frame_count,)
input_shape += (res_z,) if config.is_3d else ()
input_shape += (res_y, res_x, in_out_dim)
if classic_ae:
rec_pred = RecursivePredictionCleanSplit(config=config, input_shape=input_shape, decode_predictions=decode_predictions, skip_pred_steps=skip_pred_steps, init_state_network=init_state_network, in_out_states=in_out_states, pred_gradient_loss=pred_gradient_loss, ls_prediction_loss=ls_prediction_loss, ls_supervision=ls_supervision, sqrd_diff_loss=sqrd_diff_loss, ls_split=ls_split, supervised_parameters=sup_param_count)
else:
rec_pred = RecursivePrediction(config=config, input_shape=input_shape, decode_predictions=decode_predictions, skip_pred_steps=skip_pred_steps, init_state_network=init_state_network, in_out_states=in_out_states, pred_gradient_loss=pred_gradient_loss, ls_prediction_loss=ls_prediction_loss, ls_supervision=ls_supervision, sqrd_diff_loss=sqrd_diff_loss, ls_split=ls_split, supervised_parameters=sup_param_count)
rec_pred.load_model(args.load_path, data_args_path=data_args_path) # load_path argument
pred = Prediction(config=rec_pred.config, input_shape=(rec_pred.w_num, rec_pred.z_num))
pred._build_model()
pred.model.set_weights(rec_pred.pred.model.get_weights())
# Load dataset args
args = DictToNamespace(dataset_meta_info)
if os.path.exists( os.path.join( model_base_dir, "v_range.txt")):
vr = np.loadtxt(os.path.join(model_base_dir, "v_range.txt"))
else:
vr = np.loadtxt(os.path.join(config.data_path, "v_range.txt"))
normalization_factor_v = max(abs(vr[0]), abs(vr[1]))
print("Normalization Factor Velocity: {}".format(normalization_factor_v))
if os.path.exists( os.path.join( model_base_dir, "d_range.txt")):
dr = np.loadtxt(os.path.join(model_base_dir, "d_range.txt"))
else:
dr = np.loadtxt(os.path.join(config.data_path, "d_range.txt"))
normalization_factor_d = max(abs(dr[0]), abs(dr[1]))
print("Normalization Factor Density: {}".format(normalization_factor_d))
np.random.seed(seed=int(nseed))
seed(nseed)
assert sup_param_count == 1, "Supervised param count {} does not match {}!".format(sup_param_count, 1)
boundary_cond_order = int(args.boundary_cond_order)
density_adv_order = 2 if second_order_density_advection else int(args.density_adv_order)
training_warmup_steps = int(args.warmup_steps)
if training_warmup_steps > warmup_steps:
print("WARNING: training warmup steps {} were higher than given warmup_steps parameter... warmup_steps={}".format(training_warmup_steps, warmup_steps))
def main():
prediction_history = PredictionHistory(in_ts=rec_pred.w_num, data_shape=(rec_pred.z_num,))
# solver params
res_x = int(args.resolution_x)
res_y = int(args.resolution_y)
res_z = int(args.resolution_z)
gs = vec3(res_x, res_y, res_z)
res_max = max(res_x, max(res_y, res_z))
s = Solver(name='main', gridSize=gs, dim=3 if res_z > 1 else 2)
s.frameLength = float(args.time_step)
s.timestep = float(args.time_step)
# cg solver params
cgAcc = 1e-04
cgIter = 5
# frequency analysis
freq_x_coord = 0.8
freq_y_coord = 0.7
if upres:
gs_upres = vec3(res_x * 2, res_y * 2, res_z * 2 if res_z > 1 else res_z)
s_upres = Solver(name='upres', gridSize=gs_upres, dim=3 if res_z > 1 else 2)
density_upres = s_upres.create(RealGrid, name="density_upres")
vel_upres = s_upres.create(MACGrid, name="vel_upres")
flags_upres = s_upres.create(FlagGrid, name="flags_upres")
phiWalls_upres = s_upres.create(LevelsetGrid, name="phiWalls_upres")
fractions_upres = s_upres.create(MACGrid, name="fractions_upres")
phiObs_upres = s_upres.create(LevelsetGrid, name="phiObs_upres")
if output_uni:
if upres:
gs_blender = vec3(res_x*2, res_z * 2 if res_z > 1 else res_z, res_y*2)
else:
gs_blender = vec3(res_x, res_z, res_y)
s_blender = Solver(name='blender', gridSize=gs_blender, dim=3 if res_z > 1 else 2)
density_blender = s_blender.create(RealGrid, name="density_blender")
if not (gs_blender.x == gs_blender.y == gs_blender.z):
max_dim = max(max(gs_blender.x, gs_blender.y), gs_blender.z)
gs_blender_cubic = vec3(max_dim, max_dim, max_dim)
s_blender_cubic = Solver(name='blender', gridSize=gs_blender_cubic, dim=3 if res_z > 1 else 2)
density_blender_cubic = s_blender_cubic.create(RealGrid, name="density_blender_cubic")
else:
density_blender_cubic = None
# viscosity
worldScale = 1.0 # the normalized unit cube in manta has which world space size?
# viscosity, in [m^2/s] , rescale to unit cube
# uncomment one of these to select LDC with specific Reynolds nr
# (higher ones will need larger resolution!)
#visc = 0.0002 / (worldScale*worldScale) # Re 5k
#visc = 0.0001 / (worldScale*worldScale) # Re 10k
#visc = 0.00005 / (worldScale*worldScale) # Re 20k
#visc = 0.00001 / (worldScale*worldScale) # Re 100k
visc = 0.0000183 / (worldScale*worldScale) # Re 100k
#visc = 0. # off, rely on numerical viscosity, no proper LDC!
flags = s.create(FlagGrid, name="flags")
vel = s.create(MACGrid, name="vel")
density = s.create(RealGrid, name="density")
pressure = s.create(RealGrid, name="pressure")
fractions = s.create(MACGrid, name="fractions")
phiWalls = s.create(LevelsetGrid, name="phiWalls")
phiObs = s.create(LevelsetGrid, name="phiObs")
v_ = np.zeros([res_z,res_y,res_x,3], dtype=np.float32)
d_ = np.zeros([res_z,res_y,res_x,1], dtype=np.float32)
gui = None
if GUI and show_gui:
gui = Gui()
gui.show(True)
gui.pause()
print('start generation')
sim_id = 0
# pre-generate noise, so that all generated scenes for prediction and simulation look the same
nx_list = []
warmup_list = []
for i in range(num_scenes):
# Warmup steps
if randomized_warmup_steps:
warmup_list.append(randrange(min_warmup_steps, warmup_steps))
else:
warmup_list.append(warmup_steps)
# noise
nx_list_entry = []
if override_vel:
print("Training min/max vel: {}, {} <-> Override min/max vel: {}, {}".format(float(args.min_vel), float(args.max_vel), min_vel_override, max_vel_override))
min_vel = min_vel_override
max_vel = max_vel_override
else:
min_vel = float(args.min_vel)
max_vel = float(args.max_vel)
if randomize_vel:
rand_vel = uniform(min_vel, max_vel)
else:
cur_a = i / (num_scenes-1)
rand_vel = min_vel * (1-cur_a) + max_vel * cur_a
t_end = num_frames + warmup_list[i] if randomized_warmup_steps else num_frames
for t in range(t_end):
nx_list_entry.append(rand_vel)
nx_list.append(nx_list_entry)
# Store warmup steps
warmup_file = os.path.join(perf_data_path, 'warmup_steps.txt')
print(warmup_file)
with open(warmup_file, 'w') as f:
print("Warmup List")
print(warmup_list)
for warmup_entry in range(len(warmup_list) - 1):
f.write('%d\n' % warmup_list[warmup_entry])
f.write('%d' % warmup_list[-1])
# load vars from simulation execution
if load_warmup_from_disk:
simulation_path = get_path_to_sim("pred_smoke_karman", model_name, "simulation", nseed)
assert os.path.exists(simulation_path), "Simulation path does not exist for given seed! Abort..."
shelve_vars = shelve_file_to_var(simulation_path)
for key in shelve_vars:
locals()[key] = shelve_vars[key]
# Store variables to disk before simulation starts
shelve_vars_to_file(locals(), dir(), perf_data_path)
# Sim loop
per_scene_duration = []
per_scene_advection_duration = []
per_scene_solve_duration = []
print("Starting sim")
for i in trange(num_scenes, desc='scenes'):
freq_measure = []
flags.clear()
vel.clear()
density.clear()
pressure.clear()
fractions.clear()
phiWalls.clear()
phiObs.clear()
if upres:
flags_upres.clear()
density_upres.clear()
phiObs_upres.clear()
def init_flag(flag_grid, phiWalls_grid, phiObs_grid, fractions_grid, solver, solver_res):
obs_radius = solver_res.x * float(args.obs_radius)
inflow_radius = obs_radius * 1.3 # slightly larger
flag_grid.initDomain(inflow="xX", phiWalls=phiWalls_grid, boundaryWidth=0)
obstacle = Cylinder( parent=solver, center=solver_res*vec3(0.25,0.5,0.5), radius=obs_radius, z=solver_res*vec3(0, 0, 1.0))
phiObs_grid.join(obstacle.computeLevelset())
# slightly larger copy for density source
inflow_p0 = vec3(0.24 * solver_res.x, 0.5*solver_res.y + obs_radius, 0.0*solver_res.z)
inflow_p1 = vec3(0.27 * solver_res.x, 0.5*solver_res.y + inflow_radius, 1.0*solver_res.z)
densInflow0 = Box( parent=s, p0=inflow_p0, p1=inflow_p1) # basin
inflow_p0 = vec3(0.24 * solver_res.x, 0.5*solver_res.y - inflow_radius, 0.0*solver_res.z)
inflow_p1 = vec3(0.27 * solver_res.x, 0.5*solver_res.y - obs_radius, 1.0*solver_res.z)
densInflow1 = Box( parent=s, p0=inflow_p0, p1=inflow_p1) # basin
phiObs_grid.join(phiWalls_grid)
updateFractions( flags=flag_grid, phiObs=phiObs_grid, fractions=fractions_grid)
setObstacleFlags(flags=flag_grid, phiObs=phiObs_grid, fractions=fractions_grid)
flag_grid.fillGrid()
return densInflow0, densInflow1
densInflow0, densInflow1 = init_flag(flags, phiWalls, phiObs, fractions, s, gs)
if upres:
densInflow0_upres, densInflow1_upres = init_flag(flags_upres, phiWalls_upres, phiObs_upres, fractions_upres, s_upres, gs_upres)
# random
t_end = num_frames + warmup_list[i] if randomized_warmup_steps else num_frames
nq = deque([-1] * t_end, t_end)
# Setup fields
velInflow = vec3(nx_list[i][0], 0, 0)
vel.setConst(velInflow)
# compute Reynolds nr
Re = 0.0
if visc>0.0:
Re = ((velInflow.x/res_max) * worldScale * float(args.obs_radius) * 2.0) / visc
print("Reynolds number: {}".format(Re))
if not os.path.exists(log_dir % i):
os.makedirs(log_dir % i)
open("{}/Re_{}".format(log_dir % i, Re), "w")
# optionally randomize y component
if 1:
noiseField = s.create(NoiseField, loadFromFile=True)
noiseField.posScale = vec3(75)
noiseField.clamp = True
noiseField.clampNeg = -1.
noiseField.clampPos = 1.
testall = s.create(RealGrid); testall.setConst(-1.)
addNoise(flags=flags, density=density, noise=noiseField, sdf=testall, scale=0.1 )
setComponent(target=vel, source=density, component=1)
density.setConst(0.)
# load fields from simulation
if load_warmup_from_disk:
print("Loading warmup step {}...".format(warmup_list[i]))
t_start = warmup_list[i] - prediction_window
load_sim_path = simulation_path + "%06d/"
v_tmp = load_velocity(load_sim_path % i, t_start-1, field_path_format)
d_tmp = load_density(load_sim_path % i, t_start-1, field_path_format)
copyArrayToGridMAC(v_tmp, vel)
copyArrayToGridReal(d_tmp, density)
del v_tmp, d_tmp
else:
t_start = 0
# frame loop
per_frame_advection_duration = []
per_frame_solve_duration = []
for t in tqdm(range(t_start, t_end), desc='sim', leave=False):
start = timer()
nx = nx_list[i][t]
nq.append(nx)
densInflow0.applyToGrid( grid=density, value=1. )
densInflow1.applyToGrid( grid=density, value=1. )
if upres:
densInflow0_upres.applyToGrid(grid=density_upres, value=1.)
densInflow1_upres.applyToGrid(grid=density_upres, value=1.)
advectSemiLagrange(flags=flags, vel=vel, grid=density, order=density_adv_order)
advectSemiLagrange(flags=flags, vel=vel, grid=vel , order=2)
if upres:
zoom_mask = [2.0 if res_z > 1 else 1.0, 2.0, 2.0, 1.0]
np_vec_temp = np.zeros([res_z,res_y,res_x,3], dtype=np.float32)
copyGridToArrayVec3(vel, np_vec_temp)
np_zoomed = ndimage.zoom(np_vec_temp, zoom_mask) * 2.0
copyArrayToGridVec3(np_zoomed, vel_upres)
advectSemiLagrange(flags=flags_upres, vel=vel_upres, grid=density_upres, order=2) # use order 2 instad of 1 (as in low res)
end = timer()
if t > warmup_list[i]:
per_frame_advection_duration.append(end-start)
start = timer()
def decode(cur_ls_frame):
# decode (ae)
if classic_ae:
np_pred_v = rec_pred.ae_v._decoder.predict(x=cur_ls_frame[...,:rec_pred.z_num_vel], batch_size=1)
np_pred_d = rec_pred.ae_d._decoder.predict(x=cur_ls_frame[...,rec_pred.z_num_vel:], batch_size=1)
np_pred = np.concatenate([np_pred_v,np_pred_d],axis=-1)
else:
np_pred = rec_pred.ae._decoder.predict(x=cur_ls_frame, batch_size=1)
# velocity
if res_z > 1:
np_vel = np_pred[:,:,:,:,:3] * normalization_factor_v
else:
np_vel = np_pred[:,:,:,:2] * normalization_factor_v
# Similar to preprocessing of training data, mirror y
if res_z > 1:
np_vel = np_vel[:,:,::-1]
else:
np_vel = np_vel[:,::-1]
# reshape
if res_z > 1:
np_vel = np_vel[0] # remove batch dim
else:
in_shape = np_pred.shape
np_tmp_make3d = np.zeros(list(in_shape)[:-1] + [1])
np_vel = np.concatenate([np_vel, np_tmp_make3d], axis=-1)
# store in grid
copyArrayToGridMAC(np_vel, vel)
# density
if (prediction_type == "vel_den_prediction") and "density" in config.data_type: # or prediction_type == "enc_dec"
if res_z > 1:
np_den = (np_pred[:,:,:,:,-1] + 1.0) * 0.5
else:
np_den = (np_pred[:,:,:,-1] + 1.0) * 0.5
np_den = np.expand_dims(np_den, -1)
if res_z > 1:
np_den = np_den[0] # remove batch dim
# Similar to preprocessing of training data, mirror y
np_den = np_den[:,::-1]
copyArrayToGridReal(np_den, density)
# Solve or Prediction
if t < warmup_list[i] or prediction_type == "simulation" or prediction_type == "enc_dec" or prediction_type == "enc_only":
# vel diffusion / viscosity!
if visc > 0.0:
# diffusion param for solve = const * dt / dx^2
alphaV = visc * s.timestep * float(res_max * res_max)
#mantaMsg("Viscosity: %f , alpha=%f , Re=%f " %(visc, alphaV, Re), 0 )
setWallBcs(flags=flags, vel=vel)
cgSolveDiffusion( flags, vel, alphaV )
if(boundary_cond_order == 1):
setWallBcs(flags=flags, vel=vel)
else:
extrapolateMACSimple( flags=flags, vel=vel, distance=2 , intoObs=True)
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
setInflowBcs(vel=vel,dir='xX',value=velInflow)
solvePressure( flags=flags, vel=vel, pressure=pressure, fractions=fractions, cgAccuracy=cgAcc, cgMaxIterFac=cgIter)
if(boundary_cond_order == 1):
setWallBcs(flags=flags, vel=vel)
else:
extrapolateMACSimple( flags=flags, vel=vel, distance=5 , intoObs=True)
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
setInflowBcs(vel=vel,dir='xX',value=velInflow)
if not prediction_type == "simulation":
copyGridToArrayMAC(target=v_, source=vel)
copyGridToArrayReal(target=d_, source=density)
if res_z > 1:
input_arr = v_[:,:,:,:3] / normalization_factor_v
else:
input_arr = v_[:,:,:,:2] / normalization_factor_v
if "density" in config.data_type:
input_arr = np.concatenate([input_arr, d_ * 2.0 - 1.0], axis=-1)
# Similar to preprocessing of training data
input_arr = input_arr[:,::-1]
if res_z > 1:
input_arr = np.expand_dims(input_arr, 0) # add batch dimension...
if classic_ae:
if res_z > 1:
velo_dim = 3
else:
velo_dim = 2
enc_v_part = rec_pred.ae_v._encoder.predict(input_arr[...,:velo_dim], batch_size=1)
enc_d_part = rec_pred.ae_d._encoder.predict(input_arr[...,velo_dim:], batch_size=1)
enc_v = np.concatenate([enc_v_part,enc_d_part],axis=-1)
else:
enc_v = rec_pred.ae._encoder.predict(input_arr, batch_size=1)
if prediction_type == "enc_only":
store_latentspace(enc_v[0], log_dir % i, t, nx, field_path_format)
# Supervised entry
if ls_supervision:
if classic_ae:
enc_v[0, rec_pred.z_num_vel-1] = nx
enc_v[0, -1] = nx
else:
enc_v[0, -1] = nx
prediction_history.add_simulation(enc_v[0])
if t >= warmup_list[i] and prediction_type == "enc_dec":
decode(enc_v)
else:
# ~~ Start of Prediction
if prediction_type == "vel_prediction" and "density" in config.data_type:
# overwrite density part of history with current density
# 1) encode current density d0 (with zero vel components)
copyGridToArrayMAC(target=v_, source=vel) # added on 05.11... otherwise old v is used
copyGridToArrayReal(target=d_, source=density)
if res_z > 1:
input_arr = v_[:,:,:,:3] / normalization_factor_v
else:
input_arr = v_[:,:,:,:2] / normalization_factor_v
input_arr = np.concatenate([input_arr, d_ * 2.0 - 1.0], axis=-1)
# Similar to preprocessing of training data
input_arr = input_arr[:,::-1]
if res_z > 1:
input_arr = np.expand_dims(input_arr, 0) # add batch dimension...
if classic_ae:
if res_z > 1:
velo_dim = 3
else:
velo_dim = 2
enc_d = rec_pred.ae_d._encoder.predict(input_arr[...,velo_dim:], batch_size=1)
# Keep supervised param
if ls_supervision:
prediction_history.simulation_history[0, -1, rec_pred.z_num_vel:-sup_param_count] = enc_d[0, 0:-sup_param_count]
else:
prediction_history.simulation_history[0, -1, rec_pred.z_num_vel:] = enc_d[0, 0:]
else:
enc_d = rec_pred.ae._encoder.predict(input_arr, batch_size=1)
# 2) replace density part of sim history (maybe overwrite "wrong" vel parts with zero)
enc_d[0, :rec_pred.ls_split_idx] = 0.0 # overwrite velo components
# Keep supervised param
if ls_supervision:
prediction_history.simulation_history[0, -1, rec_pred.ls_split_idx:-sup_param_count] = enc_d[0, rec_pred.ls_split_idx:-sup_param_count]
else:
prediction_history.simulation_history[0, -1, rec_pred.ls_split_idx:] = enc_d[0, rec_pred.ls_split_idx:]
X = prediction_history.get()
# predict new field
input_shape = X.shape # e.g. (1, 16, 1, 1, 1, 2048)
X = X.reshape(*X.shape[0:2], -1) # e.g. (1, 16, 2048)
pred_delta_z = pred.model.predict(X, batch_size=X.shape[0])
cur_pred = X[0, -1] + pred_delta_z
# supervised entries
if ls_supervision:
cur_pred[0,-1,-1] = nx
# add to history
prediction_history.add_prediction(cur_pred[0])
# decode (ae)
decode(cur_pred[0])
# ~~ End of Prediction
if not profile:
# Store to disk
copyGridToArrayMAC(target=v_, source=vel)
copyGridToArrayReal(target=d_, source=density)
if res_z > 1 and output_uni:
store_density_blender(density_upres if upres else density, log_dir % i, t, density_blender=density_blender, density_blender_cubic=density_blender_cubic)
store_velocity(v_, log_dir % i, t, list(nq), field_path_format)
store_density(d_, log_dir % i, t, list(nq), field_path_format)
if t > warmup_list[i]:
# freq measure
y_coord = int(freq_y_coord * v_.shape[1])
x_coord = int(freq_x_coord * v_.shape[2])
# store only y direction
freq_measure.append(float(v_[0, y_coord, x_coord, 1]))
end = timer()
if t > warmup_list[i]:
per_frame_solve_duration.append(end-start)
s.step()
if not profile and output_images:
screenshot(gui, log_dir % i, t, density=density_upres if upres else density, scale=2.0)
if not profile and output_images:
convert_sequence( os.path.join(log_dir % i, 'screenshots'), output_name="%06d" % i, file_format="%06d.jpg" if gui else "%06d.ppm", delete_images=not dont_delete_images )
per_scene_advection_duration.append(np.array(per_frame_advection_duration))
per_scene_solve_duration.append(np.array(per_frame_solve_duration))
per_scene_duration.append(np.array(per_frame_advection_duration) + np.array(per_frame_solve_duration))
# write freq measure to disk
np_freq_measure = np.array(freq_measure)
# smooth function
N = 20
np_freq_measure_smooth = np.convolve(np_freq_measure, np.ones((N,))/N, mode='valid')
# for local maxima
freq_arg_maxima = argrelextrema(np_freq_measure_smooth, np.greater)
mask = np.ones_like(np_freq_measure,dtype=bool)
mask[freq_arg_maxima[0]] = False
np_freq_measure[mask] = 0
np_freq_measure[~mask] = 1
np_freq_measure = np.trim_zeros(np_freq_measure)
delta_N = np.sum(np_freq_measure) - 1
delta_t = len(np_freq_measure) * s.timestep
f = delta_N / delta_t
# store to disk
freq_dict = {}
freq_dict["frequency"] = float(f)
freq_dict["delta_N"] = float(delta_N)
freq_dict["delta_t"] = float(delta_t)
freq_dict["freq_measure"] = freq_measure
freq_data_path = os.path.join(log_dir % i, "frequency_{}.json".format(i))
with open( freq_data_path, 'w') as f:
json.dump(freq_dict, f, indent=4)
# plot to disk
plt.plot(np_freq_measure)
plt.ylabel('local_maximum')
plt.grid()
plt.savefig(os.path.join(log_dir % i, "frequency_{}.png".format(i)))
plt.clf()
sim_id += 1
gc.collect()
profile_dict = {}
profile_dict["model_name"] = model_name
profile_dict["per_scene_timings"] = [a.tolist() for a in per_scene_duration]
profile_dict["mean_timings_all"] = np.mean(np.array(per_scene_duration))
profile_dict["mean_timings_advection"] = np.mean(np.array(per_scene_advection_duration))
profile_dict["mean_timings_solve"] = np.mean(np.array(per_scene_solve_duration))
perf_data_path_json = os.path.join(perf_data_path, "perf_%06d.json")
perf_data_count = 0
while os.path.isfile(perf_data_path_json % perf_data_count):
perf_data_count += 1
with open( perf_data_path_json % perf_data_count, 'w') as f:
json.dump(profile_dict, f, indent=4)
print('Done')
if __name__ == '__main__':
main()
|
1647713
|
from pyjackson import deserialize, serialize
from pyjackson.decorators import type_field
@type_field('type_alias')
class Parent:
type_alias = 'parent' # also could be None for abstract parents
class Child1(Parent):
type_alias = 'child1'
def __init__(self, a: int):
self.a = a
class Child2(Parent):
type_alias = 'child2'
def __init__(self, b: str):
self.b = b
serialize(Child1(1), Parent) # {'type_alias': 'child1', 'a': 1}
deserialize({'type_alias': 'child2', 'b': 'b'}, Parent) # Child2('b')
|
1647727
|
import random
import numpy as np
import os.path as osp
import pickle as pkl
import torch
from torch_geometric.data import InMemoryDataset, Data
class BA3Motif(InMemoryDataset):
splits = ['training', 'evaluation', 'testing']
def __init__(self, root, mode='testing', transform=None, pre_transform=None, pre_filter=None):
assert mode in self.splits
self.mode = mode
super(BA3Motif, self).__init__(root, transform, pre_transform, pre_filter)
idx = self.processed_file_names.index('{}.pt'.format(mode))
self.data, self.slices = torch.load(self.processed_paths[idx])
@property
def raw_file_names(self):
return ['BA-3motif.npy']
@property
def processed_file_names(self):
return ['training.pt', 'evaluation.pt', 'testing.pt']
def download(self):
if not osp.exists(osp.join(self.raw_dir, 'raw', 'BA-3motif.npy')):
print("raw data of `BA-3motif.npy` doesn't exist, please redownload from our github.")
raise FileNotFoundError
def process(self):
edge_index_list, label_list, ground_truth_list, role_id_list, pos = np.load(osp.join(self.raw_dir, self.raw_file_names[0]), allow_pickle=True)
data_list = []
alpha = 0.25
for idx, (edge_index, y, ground_truth, z, p) in enumerate(zip(edge_index_list, label_list, ground_truth_list, role_id_list, pos)):
edge_index = torch.from_numpy(edge_index)
edge_index = torch.tensor(edge_index, dtype=torch.long)
node_idx = torch.unique(edge_index)
assert node_idx.max() == node_idx.size(0) - 1
x = torch.zeros(node_idx.size(0), 4)
index = [i for i in range(node_idx.size(0))]
x[index, z] = 1
x = alpha * x + (1 - alpha) * torch.rand((node_idx.size(0), 4))
edge_attr = torch.ones(edge_index.size(1), 1)
y = torch.tensor(y, dtype=torch.long).unsqueeze(dim=0)
# fix bug for torch > 1.6
p = np.array(list(p.values()))
data = Data(x=x, y=y, z=z,
edge_index=edge_index,
edge_attr=edge_attr,
pos = p,
ground_truth_mask=ground_truth,
name=f'BA-3motif{idx}', idx=idx)
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
random.shuffle(data_list)
torch.save(self.collate(data_list[800:]), self.processed_paths[0])
torch.save(self.collate(data_list[400:800]), self.processed_paths[1])
torch.save(self.collate(data_list[:400]), self.processed_paths[2])
|
1647748
|
from fever.scorer import evidence_macro_recall, evidence_macro_precision, fever_score
import unittest
class MaxEvidenceTestCase(unittest.TestCase):
def test_recall_partial_predictions_same_groups_zero_score(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
p,h = evidence_macro_recall(instance,max_evidence=1)
self.assertEqual(p,0)
def test_recall_partial_predictions_same_groups_one_score(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
p,h = evidence_macro_recall(instance,max_evidence=2)
self.assertEqual(p,1)
def test_precision_partial_predictions_same_groups_zero_score(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",2]]],"predicted_evidence":[["page",0],["page", 1]]}
p,h = evidence_macro_precision(instance,max_evidence=1)
self.assertEqual(p,1)
def test_precision_partial_predictions_same_groups_one_score(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",2]]],"predicted_evidence":[["page",0],["page", 1]]}
p,h = evidence_macro_precision(instance,max_evidence=2)
self.assertEqual(p,0.5)
def test_strict_partial_one(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
strict,_,_,_,_ = fever_score([instance],max_evidence=2)
self.assertEqual(strict,1)
def test_strict_partial_zero(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
strict,_,_,_,_ = fever_score([instance],max_evidence=1)
self.assertEqual(strict,0)
def test_global_precision_partial_two_sents(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",2]]],"predicted_evidence":[["page",0],["page", 1]]}
_,_,p,_,_ = fever_score([instance],max_evidence=2)
self.assertEqual(p,0.5)
def test_global_precision_partial_one_sent(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",2]]],"predicted_evidence":[["page",0],["page", 1]]}
_,_,p,_,_ = fever_score([instance],max_evidence=1)
self.assertEqual(p,1)
def test_global_recall_partial_two_sents(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
_,_,_,r,_ = fever_score([instance],max_evidence=2)
self.assertEqual(r,1)
def test_global_recall_partial_one_sent(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
_,_,_,r,_ = fever_score([instance],max_evidence=1)
self.assertEqual(r,0)
def test_non_modification(self):
instance = {"label": "supports", "predicted_label": "supports","evidence":[[[None,None,"page",0],[None,None,"page",1]]],"predicted_evidence":[["page",0],["page", 1]]}
instance_copy = instance.copy()
_,_,_,_,_ = fever_score([instance],max_evidence=0)
self.assertEqual(instance_copy,instance)
|
1647845
|
from python_pachyderm import Client as _Client
from .mixin.admin import AdminMixin
from .mixin.auth import AuthMixin
from .mixin.debug import DebugMixin
from .mixin.enterprise import EnterpriseMixin
from .mixin.health import HealthMixin
from .mixin.identity import IdentityMixin
from .mixin.license import LicenseMixin
from .mixin.pfs import PFSMixin
from .mixin.pps import PPSMixin
from .mixin.transaction import TransactionMixin
from .mixin.version import VersionMixin
class ExperimentalClient(
AdminMixin,
AuthMixin,
DebugMixin,
EnterpriseMixin,
HealthMixin,
IdentityMixin,
LicenseMixin,
PFSMixin,
PPSMixin,
TransactionMixin,
VersionMixin,
_Client,
):
def __init__(
self,
host: str = None,
port: int = None,
auth_token: str = None,
root_certs: bytes = None,
transaction_id: str = None,
tls: bool = None,
use_default_host: bool = True,
):
_Client.__init__(
self,
host,
port,
auth_token,
root_certs,
transaction_id,
tls,
use_default_host,
)
|
1647864
|
from .poly import FixedPoly
import numpy as np
import pymunk as pm
from .gravity_obj import MOVING_OBJ_COLLISION_TYPE
from .img_tool import ImageTool
from .funnel import dist
import pygame as pg
def bucket_touching_handler(arbiter, space, data):
locations = arbiter.shapes[1].locations
if arbiter.shapes[0].in_bucket:
arbiter.shapes[0].body.position = locations[0][:]
arbiter.shapes[0].body.velocity = pm.Vec2d(0., 0.)
arbiter.shapes[0].body.force = pm.Vec2d(0., 0.)
return False
elif arbiter.shapes[0].collision_type == MOVING_OBJ_COLLISION_TYPE:
d1 = dist(arbiter.shapes[0].body.position, locations[1])
d2 = dist(arbiter.shapes[0].body.position, locations[2])
d3 = dist(arbiter.shapes[0].body.position, locations[3])
d4 = dist(arbiter.shapes[0].body.position, locations[4])
if d1 < d2 and d1 < d3 and d4 < d2 and d4 < d3:
new_pos = locations[0]
obj = arbiter.shapes[0]
obj.body.position = new_pos[:]
obj.body.velocity = pm.Vec2d(0., 0.)
obj.body.angular_velocity = 0.
obj.body.force = pm.Vec2d(0., 0.)
obj.body.torque = 0.
obj.body.angle = 0.
obj.in_bucket = True
return False
else:
return True
return True
class Bucket(FixedPoly):
# 1, 5 pi
def __init__(self, pos, angle = np.pi/4, size=10.0, color='black'):
super().__init__(pos, n_sides=4, angle=(np.pi/4 + angle), size=size, color=color)
self.color = color
self.center_position = [self.pos[0], self.pos[1]]
self.v_1 = np.array(self.pos) + np.array(self.vertices[0])
self.v_2 = np.array(self.pos) + np.array(self.vertices[1])
self.v_3 = np.array(self.pos) + np.array(self.vertices[2])
self.v_4 = np.array(self.pos) + np.array(self.vertices[3])
self.img = ImageTool('bucket.png', 0.0 + angle, pos[:],
use_shape=self.shape,
debug_render=False)
self.collision_type = 6
def add_to_space(self, space):
bucket = self.img.get_shape()
bucket.collision_type = self.collision_type
bucket.locations = [self.center_position, self.v_1, self.v_2, self.v_3, self.v_4]
self.shape = bucket
space.add(bucket)
self.attached_shapes.append(bucket)
# Called when 1 (movable objects) collides with 3 (bucket)
h = space.add_collision_handler(1, self.collision_type)
h.pre_solve = bucket_touching_handler
def render(self, screen, scale=None, anti_alias=False):
if scale is None:
scale = 1
self.img.render(screen, scale, self.flipy)
|
1647866
|
import argparse, json
import boto3
from jinja2 import Environment, FileSystemLoader
"""
A bunch of free functions that we use in all scripts.
"""
def get_jinja_env(config):
"""
Get a jinja2 Environment object that we can use to find templates.
"""
return Environment(loader=FileSystemLoader('.'))
def json_file(filename):
with open(filename, 'r') as f:
return json.load(f)
def get_parent_parser():
"""
Get an argparse parser with arguments that are always needed
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--prod', action='store_false', dest='sandbox',
default=True,
help="Whether to run on the production AMT site.")
parser.add_argument('--hit_ids_file')
parser.add_argument('--config', default='config.json',
type=json_file)
return parser
def get_mturk_connection_from_args(args):
"""
Utility method to get an MTurkConnection from argparse args.
"""
aws_access_key = args.config.get('aws_access_key')
aws_secret_key = args.config.get('aws_secret_key')
return get_mturk_connection(sandbox=args.sandbox,
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key)
def get_mturk_connection(sandbox=True,
aws_access_key=None,
aws_secret_key=None,
region_name='us-east-1'):
"""
Get a boto mturk connection. This is a thin wrapper over boto3.client;
the only difference is a boolean flag to indicate sandbox or not.
"""
kwargs = {}
# boto3 client requires a region to make a connection. if you
# have a default region in your ~/.aws/config other than us-east-1,
# it throws an error. Since Mturk endpoint is by default only in
# us-east-1, there is no point of asking users to provide it. See #29
kwargs['region_name'] = region_name
if aws_access_key is not None:
kwargs['aws_access_key_id'] = aws_access_key
if aws_secret_key is not None:
kwargs['aws_secret_access_key'] = aws_secret_key
if sandbox:
host = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
else:
host='https://mturk-requester.us-east-1.amazonaws.com'
return boto3.client('mturk', endpoint_url=host, **kwargs)
def setup_qualifications(hit_properties, mtc):
"""
Replace some of the human-readable keys from the raw HIT properties
JSON data structure with boto-specific objects.
"""
qual = []
if 'QualificationId' in hit_properties and 'QualificationComparator' in hit_properties and 'QualificationInteger' in hit_properties:
comparator = hit_properties['QualificationComparator']
if comparator == '>':
c = 'GreaterThan'
elif comparator == '=':
c = 'EqualTo'
elif comparator == '<':
c = 'LessThan'
else:
print("The 'qualification comparator' is not one of the designated values ('<', '=', '>').")
qual.append({
'QualificationTypeId': hit_properties['QualificationId'],
'Comparator': c,
'IntegerValues': [int(hit_properties['QualificationInteger'])],
'RequiredToPreview': False,
})
del hit_properties['QualificationId']
del hit_properties['QualificationComparator']
del hit_properties['QualificationInteger']
if 'Country' in hit_properties:
qual.append({
'QualificationTypeId': '00000000000000000071',
'Comparator': 'In',
'LocaleValues': [{'Country': country} for country in hit_properties['Country']],
})
del hit_properties['Country']
if 'HitsApproved' in hit_properties:
qual.append({
'QualificationTypeId': '00000000000000000040',
'Comparator': 'GreaterThan',
'IntegerValues': [hit_properties['HitsApproved']],
})
del hit_properties['HitsApproved']
if 'PercentApproved' in hit_properties:
qual.append({
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThan',
'IntegerValues': [hit_properties['PercentApproved']],
})
del hit_properties['PercentApproved']
hit_properties['QualificationRequirements'] = qual
|
1647945
|
import os
import test_utils
EXAMPLES_ROOT = test_utils.EXAMPLES_ROOT
def test_1():
root_dir = os.path.join(EXAMPLES_ROOT, 'mnist')
output_evaluator = test_utils.TemplateOutputEvaluator(
b'''\
Device: @numpy
# unit: 10
# Minibatch-size: 100
# epoch: 1
epoch main/loss validation/main/loss main/accuracy validation/main/accuracy elapsed_time
0 {b0 } {d0 } {e0 }
1 {a1 } {b1 } {c1 } {d1 } {e1 }
''', # NOQA
b0=(float, lambda x: 0.0 < x),
d0=(float, lambda x: 0.00 <= x <= 1.00),
e0=(float, lambda x: 0. < x < 100.),
a1=(float, lambda x: 0.6 < x < 1.5),
b1=(float, lambda x: 0.3 < x < 0.6),
c1=(float, lambda x: 0.62 < x < 0.82),
d1=(float, lambda x: 0.83 < x < 0.98),
e1=(float, lambda x: 0. < x < 100.),
)
with test_utils.ExampleRunner(root_dir) as r:
r.run(
'train_mnist.py',
[
'--epoch', '1',
'--unit', '10',
],
output_evaluator=output_evaluator)
|
1647974
|
from LucidDynamodb import DynamoDb
from LucidDynamodb.exceptions import (
TableNotFound
)
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
try:
db = DynamoDb()
db.delete_table(table_name='dev_jobs')
logging.info("Table deleted successfully")
table_names = db.read_all_table_names()
logging.info(f"Table names: {table_names}")
except TableNotFound as e:
logging.error(f"Table delete operation failed {e}")
"""
dineshsonachalam@macbook examples % python 14-delete-a-table.py
INFO:botocore.credentials:Found credentials in environment variables.
INFO:root:Table deleted successfully
INFO:root:Table names: ['CertMagic', 'dev_test', 'kp-config-v1', 'test-1']
"""
|
1647985
|
from test.base import BaseTestCase
class ApiSettingsTest(BaseTestCase):
__URL = '/api/settings'
def setUp(self):
pass
def test_get(self):
response = self.client.get(self.__URL)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response)
|
1648016
|
from collections import namedtuple
from abc import ABCMeta, abstractmethod
## @brief Interface class to be used by modules who want to recieve a notification whenever the date changes
class DateChangeListener:
__metaclass__ = ABCMeta
@abstractmethod
def on_date_change( self, new_date ):
pass
## @brief Interface class to be used by modules who want to recieve a notification whenever the year changes
class YearChangeListener:
__metaclass__ = ABCMeta
@abstractmethod
def on_year_change( self, new_year ):
pass
## @brief Provides an interface for listeners who want to be notified every x seconds by the watch
class TimePeriodWatchListener:
__metaclass__ = ABCMeta
@abstractmethod
def on_time_period_update( self, current_time ):
pass
## @brief Provides an interface for listeners who want to be notified at a particular time of the day by the watch
class DailyWatchListener:
__metaclass__ = ABCMeta
@abstractmethod
def on_daily_time_update( self, current_time ):
pass
DailyWatchListenerPair = namedtuple( 'DailyWatchListenerPair', 'secs_since_midnight daily_watch_listener' )
|
1648025
|
import logging
import os.path
from flask import request
from flask_restplus import Resource, fields
from common import api, main
log = logging.getLogger(__name__)
# This collects the API operations into named groups under a root URL.
example_ns = api.namespace('example', description="Example operations")
ExampleObj = api.model('Example', {
'in_str': fields.String(required=True,
description='your str',
example="exs"),
})
@example_ns.route('/<string:in_str>')
class ExampleResource(Resource):
@api.marshal_with(ExampleObj)
def get(self, in_str):
"""Takes in data"""
log.debug("Got parameter: %r", in_str)
log.debug("Got body: %r", request.data)
return {"in_str": in_str}
if __name__ == '__main__':
main(os.path.splitext(os.path.basename(__file__))[0] + '.json')
|
1648083
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from .views import index_template_view, viettel_user_detail_view
from .viewsets import ViettelShakeViewSet, ViettelUserViewSet
app_name = 'shake'
router = DefaultRouter()
router.register(r'shake', ViettelShakeViewSet, basename='shake')
router.register(r'user', ViettelUserViewSet, basename='user')
urlpatterns = [
path('', index_template_view, name='index'),
path('detail/<str:phone>/', viettel_user_detail_view),
]
urlpatterns += router.urls
|
1648133
|
from typing import (
Any,
Awaitable,
Callable,
Iterable,
Optional,
TypeVar,
overload,
Protocol,
)
from .task import from_result, zero
from .util import IDisposable
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
TD = TypeVar("TD", bound=IDisposable)
U = TypeVar("U")
class Delayed(Protocol[T_co]):
def __call__(self, __unit: Optional[None] = None) -> Awaitable[T_co]:
...
class TaskBuilder:
def Bind(
self, computation: Awaitable[T], binder: Callable[[T], Awaitable[U]]
) -> Awaitable[U]:
async def bind() -> U:
value = await computation
return await binder(value)
return bind()
def Combine(
self, computation1: Awaitable[None], computation2: Delayed[T]
) -> Awaitable[T]:
return self.Bind(computation1, computation2)
def Delay(self, generator: Callable[[], Awaitable[T]]) -> Delayed[T]:
def deferred(_: Any = None) -> Awaitable[T]:
# print("Delay: deferred: ", generator)
return generator()
return deferred
def For(
self, sequence: Iterable[T], body: Callable[[T], Awaitable[U]]
) -> Awaitable[U]:
done = False
it = iter(sequence)
try:
cur = next(it)
except StopIteration:
done = True
def delay():
nonlocal cur, done
res = body(cur)
try:
cur = next(it)
except StopIteration:
done = True
return res
return self.While(lambda: not done, self.Delay(delay))
@overload
def Return(self) -> Awaitable[None]:
...
@overload
def Return(self, value: T) -> Awaitable[T]:
...
def Return(self, value: Optional[T] = None) -> Awaitable[Optional[T]]:
return from_result(value)
def ReturnFrom(self, computation: Awaitable[T]) -> Awaitable[T]:
return computation
def TryFinally(
self, computation: Delayed[T], compensation: Callable[[], None]
) -> Awaitable[T]:
async def try_finally() -> T:
try:
t = await computation()
finally:
compensation()
return t
return try_finally()
def TryWith(
self, computation: Delayed[T], catchHandler: Callable[[Any], Awaitable[T]]
) -> Awaitable[T]:
async def try_with() -> T:
try:
t = await computation()
except Exception as exn:
t = await catchHandler(exn)
return t
return try_with()
def Using(self, resource: TD, binder: Callable[[TD], Awaitable[U]]) -> Awaitable[U]:
return self.TryFinally(
self.Delay(lambda: binder(resource)), lambda: resource.Dispose()
)
@overload
def While(
self, guard: Callable[[], bool], computation: Delayed[None]
) -> Awaitable[None]:
...
@overload
def While(self, guard: Callable[[], bool], computation: Delayed[T]) -> Awaitable[T]: # type: ignore
...
def While(
self, guard: Callable[[], bool], computation: Delayed[Any]
) -> Awaitable[Any]:
if guard():
return self.Bind(computation(), lambda _: self.While(guard, computation))
else:
return self.Return()
def Zero(self) -> Awaitable[None]:
return zero()
def Run(self, computation: Delayed[T]) -> Awaitable[T]:
return computation()
task = TaskBuilder
__all__ = ["task"]
|
1648143
|
import json
import boto
from boto.s3.key import Key
MAX_CACHE_TIME = 30
# Local file settings
DATA_DIR = "prices/price_data/"
SEPARATOR = "_-_"
MAX_PRICES = 129600
# AWS S3 settings
BUCKET_NAME = "acacia-prices"
def prices_get(exchange, pair, num_prices=None, price_ratio=1, cached=True):
filename = get_filename(exchange, pair)
if cached == True:
from django.core.cache import cache
text = cache.get(filename)
else:
text = None
if text == None:
try:
text = get_s3_text(filename)
if cached == True:
cache.set(filename, text, MAX_CACHE_TIME)
except Exception as e:
print str(e)
return None
result = json.loads(text)
result = result[0::price_ratio]
if num_prices != None:
return result[:num_prices]
else:
return result
def prices_append(exchange, pair, value):
prices = prices_get(exchange, pair, cached=False)
if prices == None:
prices = []
prices = [value] + prices
if len(prices) > MAX_PRICES:
prices = prices[len(prices) - MAX_PRICES:]
filename = get_filename(exchange, pair)
set_s3_from_string(filename, json.dumps(prices))
def get_s3_text(filename):
k = get_s3_key()
k.key = filename
return k.get_contents_as_string()
def set_s3_from_string(filename, data):
k = get_s3_key()
k.key = filename
k.set_contents_from_string(data)
def get_s3_key():
c = boto.connect_s3()
b = c.get_bucket(BUCKET_NAME)
return Key(b)
def get_filename(exchange, pair):
return DATA_DIR + exchange + SEPARATOR + pair + ".json"
|
1648171
|
import keyword
from pathlib import Path
from django.core.management import CommandError
from django.template.loader import get_template
from ._base import BaseGenerateCommand
from ...utils import pascalcase
TEMPLATES = {
"_reflex.py": "sockpuppet/scaffolds/reflex.py",
"_controller.js": "sockpuppet/scaffolds/controller.js",
".js": "sockpuppet/scaffolds/application.js",
".py": "sockpuppet/scaffolds/view.py",
".html": "sockpuppet/scaffolds/template.html",
}
class Command(BaseGenerateCommand):
help = "Scaffold for reflex. Includes javascript and python."
def add_arguments(self, parser):
parser.add_argument(
"app_name",
nargs=1,
type=str,
help="The app where the generated files should be placed",
)
parser.add_argument(
"reflex_name",
nargs="?",
type=str,
help="The name of the reflex and javascript controller",
default="example",
)
parser.add_argument(
"--javascript",
dest="javascript",
action="store_true",
help="Include this to generate a setup than includes javascript with controllers",
)
parser.set_defaults(javascript=False)
def handle(self, *args, **options):
app_name = options["app_name"][0]
reflex_name = options["reflex_name"].lower()
using_javascript = options["javascript"]
if not reflex_name.isidentifier():
raise CommandError(
f"The reflex name ({reflex_name}) must be a valid Python identifier."
)
if reflex_name == "_":
raise CommandError("The reflex name must not be a single underscore.")
if reflex_name in keyword.kwlist:
raise CommandError(
f"The reflex name ({reflex_name}) can't be a Python keyword."
)
module_path = self.lookup_app_path(app_name)
self.module_path = Path(module_path)
paths = [
(False, "reflexes", "_reflex.py"),
(True, "javascript", ".js"),
(True, "javascript/controllers", "_controller.js"),
(False, "views", ".py"),
(False, "templates", ".html"),
]
for without_js, path, suffix in paths:
template_name = TEMPLATES[suffix]
template = get_template(template_name)
rendered = template.render(
{
"class_name": pascalcase(reflex_name),
"reflex_name": reflex_name,
"using_javascript": using_javascript,
}
)
if without_js and not using_javascript:
# skipping these templates
continue
self.create_file(path, "{}{}".format(reflex_name, suffix), rendered)
self.create_file("views", "__init__.py", "")
self.create_file("reflexes", "__init__.py", "")
self.call_stdout("Scaffolding generated!", _type="SUCCESS")
if (self.module_path / "views.py").exists():
msg = "We created a views directory which means that you need to move your initial views there"
self.call_stdout("")
self.call_stdout(msg, _type="WARNING")
self.call_stdout("Last step is to add the view to urls.py", _type="SUCCESS")
|
1648177
|
import json
from pathlib import Path
from typing import Any, Dict
from tqdm import tqdm
from pokeapi_ditto.common import apply_base_url
def _is_id(s: str):
try:
int(s)
return True
except ValueError:
return False
def _dump(path: Path, content: Any):
if not path.parent.exists():
path.parent.mkdir(parents=True)
path.write_text(json.dumps(content, sort_keys=True, indent=4))
# TODO: blow all this up and make it good
# this is really bade code and hard to follow
# all this path.parent.parent nonsense is hard to understand
# clone.py is a cleaner model to follow
def do_transform(src_dir: str, dest_dir: str, base_url: str):
src_dir: Path = Path(src_dir)
dest_dir: Path = Path(dest_dir)
if base_url.endswith("/"):
base_url = base_url[:-1]
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
src_paths = src_dir.glob("**/*.json")
for src_path in tqdm(list(src_paths)):
content: Dict = json.loads(apply_base_url(src_path.read_text(), base_url))
# all files
dest_path = dest_dir.joinpath(src_path.relative_to(src_dir))
_dump(dest_path, content)
# named resource files
if _is_id(dest_path.parent.name) and "name" in content:
name = content["name"]
dest_path = dest_path.parent.parent.joinpath(name, "index.json")
_dump(dest_path, content)
# a hack for pokemon/ID/encounters
if (
_is_id(dest_path.parent.parent.name)
and dest_path.parent.name == "encounters"
):
pokemon_path = src_path.parent.parent.joinpath("index.json")
name = json.loads(pokemon_path.read_text())["name"]
dest_path = dest_path.parent.parent.parent.joinpath(
name, "encounters", "index.json"
)
_dump(dest_path, content)
|
1648180
|
import six
import os, errno, logging, inspect
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.attributes import get_history
from sqlalchemy.orm.util import class_mapper
from sqlalchemy import event
from sqlalchemy.util import set_creation_order
from weakref import WeakKeyDictionary
from iktomi.db.files import TransientFile, PersistentFile
logger = logging.getLogger(__name__)
class FileEventHandlers(object):
def __init__(self, prop):
self.prop = prop
def _get_history(self, target):
return get_history(target, self.prop.attribute_name)
@staticmethod
def _remove_file(path):
try:
os.remove(path)
except OSError as exc:
if exc.errno==errno.ENOENT:
logger.warning("Can't remove file %r: doesn't exist", path)
#raise # XXX
else:
raise # pragma: no cover
def _store_transient(self, target):
transient = getattr(target, self.prop.key)
if transient is None:
for file_attr, target_attr in self.prop.cache_properties.items():
setattr(target, target_attr, None)
return
if isinstance(transient, PersistentFile):
return
assert isinstance(transient, TransientFile), repr(transient)
persistent = self._2persistent(target, transient)
file_attr = getattr(type(target), self.prop.key)
file_attr._states[target] = persistent
for file_attr, target_attr in self.prop.cache_properties.items():
setattr(target, target_attr, getattr(persistent, file_attr))
def _2persistent(self, target, transient):
session = object_session(target)
persistent_name = getattr(target, self.prop.attribute_name).decode('utf-8')
attr = getattr(type(target), self.prop.key)
file_manager = session.find_file_manager(attr)
persistent = file_manager.get_persistent(persistent_name,
self.prop.persistent_cls)
file_attr = getattr(target.__class__, self.prop.key)
file_manager = session.find_file_manager(file_attr)
return file_manager.store(transient, persistent)
def before_insert(self, mapper, connection, target):
self._store_transient(target)
def before_update(self, mapper, connection, target):
changes = self._get_history(target)
if not (changes.deleted or changes.added):
return
if changes.deleted:
old_name = self._get_file_name_to_delete(target, changes)
if old_name:
session = object_session(target)
file_attr = getattr(target.__class__, self.prop.key)
file_manager = session.find_file_manager(file_attr)
old = file_manager.get_persistent(old_name,
self.prop.persistent_cls)
self._remove_file(old.path)
self._store_transient(target)
def _get_file_name_to_delete(self, target, changes):
if changes and changes.deleted:
filename = changes.deleted[0]
if filename is not None:
return filename.decode('utf-8')
def after_delete(self, mapper, connection, target):
changes = self._get_history(target)
old_name = self._get_file_name_to_delete(target, changes)
old_name = old_name or getattr(target, self.prop.attribute_name)
if old_name is not None:
old_name = old_name.decode('utf-8')
session = object_session(target)
file_attr = getattr(target.__class__, self.prop.key)
file_manager = session.find_file_manager(file_attr)
old = file_manager.get_persistent(old_name,
self.prop.persistent_cls)
self._remove_file(old.path)
class FileAttribute(object):
def __init__(self, prop, class_=None):
self.prop = prop
self.column = prop.column
self.attribute_name = prop.attribute_name
self.name_template = prop.name_template
self.class_ = class_
self.cache_properties = prop.cache_properties
self.persistent_cls = prop.persistent_cls
# State for each instance
self._states = WeakKeyDictionary()
def __get__(self, inst, cls=None):
if inst is None:
return self
if inst not in self._states:
# XXX column name may differ from attribute name
# empty string should be considered as None
value = getattr(inst, self.attribute_name) or None
if value is not None:
session = object_session(inst)
if session is None:
raise RuntimeError('Object is detached')
if not hasattr(session, 'file_manager'):
raise RuntimeError(
"Session doesn't support file management")
file_manager = session.find_file_manager(self)
value = file_manager.get_persistent(value.decode('utf-8'),
self.persistent_cls)
for file_attr, target_attr in self.cache_properties.items():
setattr(value, file_attr, getattr(inst, target_attr))
self._states[inst] = value
return self._states[inst]
def __set__(self, inst, value):
if inst in self._states and self._states[inst]==value:
return
# sqlalchemy bug workaround
# To get correct history we should assert that old value has been
# loaded from database. getattr loads lazy attribute.
# See http://www.sqlalchemy.org/trac/ticket/2787
old_name = getattr(inst, self.attribute_name)
self._states[inst] = value
if value is None:
setattr(inst, self.attribute_name, None)
elif isinstance(value, TransientFile):
ext = os.path.splitext(value.name)[1]
# XXX getting manager from a file object
# looks like a hack
name = value.manager.new_file_name(
self.name_template, inst, ext, old_name)
setattr(inst, self.attribute_name, name.encode('utf-8'))
elif isinstance(value, PersistentFile):
setattr(inst, self.attribute_name, value.name.encode('utf-8'))
for file_attr, target_attr in self.cache_properties.items():
setattr(inst, target_attr, getattr(value, file_attr))
else:
raise ValueError('File property value must be TransientFile, '\
'PersistentFile or None')
class FileProperty(MapperProperty):
attribute_cls = FileAttribute
event_cls = FileEventHandlers
def __init__(self, column, name_template, attribute_name=None, **options):
super(FileProperty, self).__init__()
self.column = column
self._attribute_name = attribute_name
self.name_template = name_template
self._set_options(options)
set_creation_order(self)
@property
def attribute_name(self):
return self._attribute_name or self.column.key
def _set_options(self, options):
self.persistent_cls = options.pop('persistent_cls', PersistentFile)
self.cache_properties = dict(options.pop('cache_properties', {}))
assert not options, "Got unexpeted parameters: %s" % (
options.keys())
def instrument_class(self, mapper):
handlers = self.event_cls(self)
event.listen(mapper, 'before_insert', handlers.before_insert,
propagate=True)
event.listen(mapper, 'before_update', handlers.before_update,
propagate=True)
event.listen(mapper, 'after_delete', handlers.after_delete,
propagate=True)
setattr(mapper.class_, self.key,
self.attribute_cls(self, mapper.class_))
# XXX Implement merge?
def filesessionmaker(sessionmaker, file_manager, file_managers=None):
u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager)
'''
registry = WeakKeyDictionary()
if file_managers:
for k, v in six.iteritems(file_managers):
if isinstance(k, FileAttribute):
raise NotImplementedError()
registry[k] = v
def find_file_manager(self, target):
if isinstance(target, FileAttribute):
assert hasattr(target, 'class_')
target = target.class_
else:
if not inspect.isclass(target):
target = type(target)
assert hasattr(target, 'metadata')
assert class_mapper(target) is not None
if target in registry:
return registry[target]
if target.metadata in registry:
return registry[target.metadata]
return file_manager
def session_maker(*args, **kwargs):
session = sessionmaker(*args, **kwargs)
# XXX in case we want to use session manager somehow bound
# to request environment. For example, to generate user-specific
# URLs.
#session.file_manager = \
# kwargs.get('file_manager', file_manager)
session.file_manager = file_manager
session.find_file_manager = six.create_bound_method(
find_file_manager,
session)
return session
return session_maker
|
1648194
|
import unittest
import rem
import six
from six.moves import cPickle as pickle
class T07(unittest.TestCase):
"""Checking internal REM structures"""
def testTagWrapperSerialization(self):
import pickle
tag = rem.Tag("test")
wrapOrig = rem.storages.TagWrapper(tag)
wrapDesc = pickle.dumps(wrapOrig)
wrapNew = pickle.loads(wrapDesc)
self.assertTrue(isinstance(wrapNew, rem.storages.TagWrapper))
self.assertEqual(wrapNew.name, wrapOrig.name)
|
1648214
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.template.loader import get_template
from django.template import Context
from django.forms import EmailField
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_unicode
import re
import ldap
import sys
import pytz
from datetime import datetime
from optparse import make_option
import logging
from pprint import pprint as pp
from fum.models import Users, Groups, Projects, Servers
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Check DB<>LDAP sync'
def entries_with_id(self, results, newpk, key):
matches = []
for k in results:
pk = k[1].get(key, None)
if not pk:
continue
pk = pk[0]
if int(newpk)==int(pk):
matches.append(k)
return matches
def sync(self, matches):
for instance, fields in matches.iteritems():
for field in fields:
curval = getattr(instance, field)
l = instance.ldap
mlist = l.get_modify_modlist(l.as_ldap_value(field, curval), force_update=True)
l.op_modify(l.dn, mlist)
def deep(self, queryset, exclude=[]):
M = {'matches':{},'exclusions':{}}
def compare(instance,dbval,ldap,ldapval):
lval = ldap.get(ldapval, [''])
ival = getattr(instance, mf)
ival = unicode(ival).encode('utf-8') if (ival is not None) else ''
return ival==lval[0]
def store(instance, key):
slot = 'matches'
if key in exclude:
slot = 'exclusions'
M[slot].setdefault(instance, [])
M[slot][instance].append(key)
for instance in queryset:
ul = instance.lval()
for mf,lf in instance.ldap_fields.iteritems():
if isinstance(lf, list):
for ldap_attr in lf:
if not compare(instance, mf, ul, ldap_attr):
store(instance, mf)
else:
if not compare(instance, mf, ul, lf):
store(instance, mf)
return M
def roam(self, results, key):
print "----------------------"
duplicate = []
broken = []
ids = set()
for k in results:
pk = k[1].get(key, None)
if not pk:
broken.append(k)
continue
# skip ou=Luola (obsolete)
if "ou=Luola" in k[0]:
return
pk = pk[0]
if pk not in ids:
ids.add(pk)
else:
duplicate.append(k)
for k in duplicate:
pp(self.entries_with_id(results, k[1][key][0], key))
return dict(duplicate=duplicate, broken=broken)
def exists_in_db(self, results, field='name'):
result = dict(yes=[], no=[], error=[])
for k in results:
hostpath = k[0]
cn = [j for i,j in tuple([h.split('=') for h in hostpath.split(',')]) if i in ['cn','uid']]
model = self.get_ldap_group_model(hostpath)
if cn:
try:
r = model.objects.get(**{field: cn[0]})
except Exception, e:
result['no'].append(k)
else:
result['error'].append(k)
return result
def get_ldap_group_model(self, cn):
if "ou=Groups" in cn:
if "ou=Projects" in cn:
model = Projects
elif "ou=Hosts" in cn:
model = Servers
else:
model = Groups
if "ou=People" in cn:
model = Users
if not model:
raise Exception("No model found", cn)
return model
def handle(self, *args, **options):
print "Data in FUM, that is *NOT* in LDAP"
missing = {'fum-ldap': {}, 'ldap-fum': {}}
for h in [Users, Groups, Projects, Servers]:
missing['fum-ldap'].setdefault(h, [])
missing['ldap-fum'].setdefault(h, [])
for i in h.objects.all().order_by('pk'):
try:
i.lval()
except Exception, e:
missing['fum-ldap'][h].append(i)
pp(missing['fum-ldap'])
print "========================="
print "Test gidNumber existence, uniqueness"
gids = {}
u = Users()
# gidNumber = 2000 for all, uidNumber is unique
results = u.ldap.fetch(u.ldap_base_dn, scope=ldap.SCOPE_SUBTREE, filters=u.ldap_filter, attrs=['gidNumber','uidNumber'])
rs = self.roam(results, 'uidNumber')
gids[Users] = rs
missing['ldap-fum'][Users] = self.exists_in_db(results, field='username')
for o in [Groups]: # Groups holds ou=Hosts, ou=Projects within
print o
u = o()
results = u.ldap.fetch(u.ldap_base_dn, scope=ldap.SCOPE_SUBTREE, filters=u.ldap_filter, attrs=['gidNumber'])
rs = self.roam(results, 'gidNumber')
gids[o] = rs
missing['ldap-fum'][o] = self.exists_in_db(results, field='name')
pp(gids)
print "========================="
print "Data in LDAP, that is *NOT* in FUM"
pp(missing['ldap-fum'])
|
1648282
|
from flask import *
from functools import wraps
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(url_for('user.login_route'))
return wrap
|
1648305
|
import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import ObservationWrapper
class TestObservationTrackingWrapper(unittest.TestCase):
def get_test_env(self) -> AbstractEnv:
bench = CMAESBenchmark()
env = bench.get_benchmark(seed=42)
return env
def test_flatten(self):
wrapped_env = ObservationWrapper(self.get_test_env())
d = {"b": 0, "a": np.array([0, 1.4, 3])}
flat = wrapped_env.flatten(d)
expected = np.array([0, 1.4, 3, 0])
np.testing.assert_array_almost_equal(flat, expected)
def test_conversion_wrapper(self):
action = 0.2
env = self.get_test_env()
reset_state_env = env.reset()
step_state_env, *rest_env = env.step(action)
self.assertIsInstance(reset_state_env, dict)
wrapped_env = ObservationWrapper(self.get_test_env())
reset_state_wrapped = wrapped_env.reset()
step_state_wrapped, *reset_wrapped = wrapped_env.step(action)
self.assertIsInstance(reset_state_wrapped, np.ndarray)
self.assertListEqual(rest_env, reset_wrapped)
np.testing.assert_array_equal(
wrapped_env.flatten(reset_state_env), reset_state_wrapped
)
np.testing.assert_array_equal(
wrapped_env.flatten(step_state_env), step_state_wrapped
)
|
1648315
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../powderday/agn_models/')
from hopkins import agn_spectrum as hopkins_agn
from astropy import units as u
from astropy import constants as const
import h5py
from hyperion.model import ModelOutput
BH_modelfile = "/home/desika.narayanan/pd_git/powderday/agn_models/clumpy_models_201410_tvavg.hdf5"
nenkova_params = [5,30,0,1.5,30,40] #Nenkova+ (2008) model parameters
class Nenkova2008:
def __init__(self, N0=5, Y=30, i=0, q=1.5, sig=30, tv=40):
self.N0 = N0
self.Y = Y
self.i = i
self.q = q
self.sig = sig
self.tv = tv
try:
self.h = h5py.File(BH_modelfile, 'r')
except IOError:
raise IOError('Unable to find Nenkova BH model file. '
'Check the path in parameters master, or '
'download the file here: https://www.clump'
'y.org/downloads/clumpy_models_201410_tvav'
'g.hdf5')
self.check_params()
def nenkova_agn_spectrum(log_L_bol):
h = h5py.File(BH_modelfile, 'r')
nu_vec = 3e14 / h['wave'][:]
nu_vec = np.log10(nu_vec)
nu_vec = np.concatenate((nu_vec, [-1, -2, -3, -4]))
l_band_vec_torus = h['flux_tor'][:][1][0]
agn_nu, agn_l_band_vec = hopkins_agn(log_L_bol)
l_band_vec = np.log10(l_band_vec_torus) + np.interp(
nu_vec[:-4], agn_nu[:-4], agn_l_band_vec[:-4])
l_band_vec = np.concatenate((l_band_vec, [0, 0, 0, 0]))
return nu_vec, l_band_vec
#nenkova
fig = plt.figure()
ax = fig.add_subplot(111)
log_lum = np.linspace(9,13,100)*u.Lsun
norm = matplotlib.colors.Normalize(
vmin = np.min(log_lum.value),
vmax = np.max(log_lum.value))
c_m = matplotlib.cm.viridis_r
s_m = matplotlib.cm.ScalarMappable(cmap = c_m,norm=norm)
s_m.set_array([])
for lum in log_lum:
print('lum = %e'%lum.value)
nu,bh_fnu = nenkova_agn_spectrum(lum.value)
#regularlizing units
bh_fnu = bh_fnu[0:-4]
bh_fnu = 10.**bh_fnu * u.erg/u.s
bh_fnu = bh_fnu.to(u.Lsun)
nu = nu[0:-4]
nu = 10.**nu
nu *= u.Hz
lam = (const.c/nu).to(u.micron)
ax.loglog(lam,bh_fnu,color=s_m.to_rgba(lum.value))
ax.set_xlabel(r'Wavelength ($\mu$m)')
ax.set_ylabel(r'F$_\nu$')
cb = fig.colorbar(s_m,orientation='vertical')
cb.set_label(r'Black Hole L$_\mathrm{bol}$ (L$_\odot$)')
cb.ax.tick_params(labelsize=8)
plt.savefig('nenkova.png',dpi=300)
#hopkins
fig = plt.figure()
ax = fig.add_subplot(111)
log_lum = np.linspace(9,13,100)*u.Lsun
norm = matplotlib.colors.Normalize(
vmin = np.min(log_lum.value),
vmax = np.max(log_lum.value))
c_m = matplotlib.cm.viridis_r
s_m = matplotlib.cm.ScalarMappable(cmap = c_m,norm=norm)
s_m.set_array([])
for lum in log_lum:
print('lum = %e'%lum.value)
nu,bh_fnu = hopkins_agn(lum.value)
#regularlizing units
bh_fnu = bh_fnu[0:-4]
bh_fnu = 10.**bh_fnu * u.erg/u.s
bh_fnu = bh_fnu.to(u.Lsun)
nu = nu[0:-4]
nu = 10.**nu
nu *= u.Hz
lam = (const.c/nu).to(u.micron)
ax.loglog(lam,bh_fnu,color=s_m.to_rgba(lum.value))
#load in the bh sed from a powderday run
'''
data = np.load('/ufrc/narayanan/desika.narayanan/pd_runs/ena/bh_sed.npz')
nholes = data['luminosity'].shape[0]
for i in range(nholes):
pd_nu = data['nu']*u.Hz
pd_lam = (const.c/pd_nu).to(u.micron)
pd_fnu = (data['fnu'][i][:]*u.erg/u.s).to(u.Lsun).value
if data['luminosity'][i] > 0:
ax.plot(pd_lam.value,pd_fnu)
'''
'''
#now plot the powderday SED
run = '/ufrc/narayanan/desika.narayanan/pd_runs/ena/example.094.rtout.bhon.sed'
m = ModelOutput(run)
wav,flux = m.get_sed(inclination='all',aperture=-1)
fullrun_wav = np.asarray(wav)*u.micron
fullrun_flux = np.asarray(flux)*u.erg/u.s
fullrun_nu = (const.c/fullrun_wav).to(u.Hz)
fullrun_fnu = fullrun_flux/fullrun_nu
ax.plot(fullrun_wav.value,fullrun_fnu[0,:].value/1.e20)
'''
ax.set_xlabel(r'Wavelength ($\mu$m)')
ax.set_ylabel(r'F$_\nu$')
cb = fig.colorbar(s_m,orientation='vertical')
cb.set_label(r'Black Hole L$_\mathrm{bol}$ (L$_\odot$)')
cb.ax.tick_params(labelsize=8)
fig.savefig('hopkins.png',dpi=300)
|
1648322
|
from django.db.models import Count
from django.http import JsonResponse
from rest_framework.views import APIView
from std_bounties.models import Bounty, Token
from std_bounties.serializers import TokenSerializer
class Tokens(APIView):
@staticmethod
def get(self):
token_qs = {}
result = []
token_to_append = {}
token_count = {}
token_count = Bounty.objects.values(
'token_symbol', 'token_contract', 'token_decimals').annotate(
count=Count('token_symbol')).order_by('-count')
for bounty in token_count:
token_to_append = {}
token_to_append.update(bounty)
token_qs = Token.objects.filter(symbol=bounty['token_symbol'])
if token_qs.count() > 0:
serializer = TokenSerializer(token_qs, many=True)
token_to_append['token'] = serializer.data
else:
token_to_append['token'] = []
result.append(token_to_append)
return JsonResponse(result, safe=False)
|
1648332
|
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError
from PuzzleLib.Modules.DeconvND import DeconvND
class Deconv1D(DeconvND):
def __init__(self, inmaps, outmaps, size, stride=1, pad=0, dilation=1, postpad=0, wscale=1.0, useBias=True,
name=None, initscheme=None, empty=False, groups=1):
super().__init__(
2, inmaps, outmaps, (1, size), (1, stride), (0, pad), (1, dilation), (0, postpad), wscale, useBias,
name, initscheme, empty, groups
)
self.registerBlueprint(locals())
def optimizeForShape(self, shape, memlimit=None):
shape = shape[:2] + (1, ) + shape[2:]
super().optimizeForShape(shape, memlimit)
def updateData(self, data):
data = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateData(data)
self.data = self.data.reshape(*self.data.shape[:2], *self.data.shape[3:])
def updateGrad(self, grad):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateGrad(grad)
self.inData = data
self.grad = self.grad.reshape(*self.grad.shape[:2], *self.grad.shape[3:])
def accGradParams(self, grad, scale=1.0, momentum=0.0):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().accGradParams(grad, scale, momentum)
self.inData = data
def checkDataShape(self, shape):
if len(shape) != 3:
raise ModuleError("Data must be 3d tensor")
_, inmaps, _ = shape
if inmaps != self.W.shape[0]:
raise ModuleError("Data has %d maps (expected: %d)" % (inmaps, self.W.shape[0]))
def dataShapeFrom(self, shape):
batchsize, inmaps, insize = shape
_, outmaps, _, fsize = self.W.shape
_, pad = self.pad
_, postpad = self.postpad
_, dilation = self.dilation
_, stride = self.stride
outmaps *= self.groups
outsize = (insize - 1) * stride + dilation * (fsize - 1) - 2 * pad + 1 + postpad
return batchsize, outmaps, outsize
def checkGradShape(self, shape):
if len(shape) != 3:
raise ModuleError("Grad must be 3d tensor")
_, outmaps, size = shape
if outmaps != self.W.shape[1] * self.groups:
raise ModuleError("Grad has %d maps (expected: %d)" % (outmaps, self.W.shape[1] * self.groups))
if size + 2 * self.pad[1] < self.dilation[1] * (self.W.shape[3] - 1) + 1:
raise ModuleError(
"Grad maps height is too small (got %d, expected at least %d)" %
(size + 2 * self.pad[1], self.dilation[1] * (self.W.shape[3] - 1) + 1)
)
def gradShapeFrom(self, shape):
batchsize, outmaps, outsize = shape
inmaps, _, _, fsize = self.W.shape
_, pad = self.pad
_, dilation = self.dilation
_, stride = self.stride
insize = (outsize + 2 * pad - dilation * (fsize - 1) - 1) // stride + 1
return batchsize, inmaps, insize
def unittest():
if Config.backend in {Config.Backend.cuda, Config.Backend.hip}:
multiMapsWithPadsTest()
trainTest()
def multiMapsWithPadsTest():
batchsize, inmaps, size = 5, 4, 2
outmaps, fsize, stride, pad, dilation = 4, 2, 2, 1, 2
hostData = np.random.randn(batchsize, inmaps, size).astype(np.float32)
data = gpuarray.to_gpu(hostData)
deconv = Deconv1D(inmaps, outmaps, size=size, stride=stride, pad=pad, dilation=dilation, initscheme="gaussian")
deconv(data)
hostW, hostBias = deconv.W.get(), deconv.b.get()
hostOutData = np.zeros(deconv.data.shape[:2]+(deconv.data.shape[2]+2*pad, ), dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :] = hostBias[0, c, 0, 0]
for b in range(batchsize):
for oc in range(outmaps):
for ic in range(inmaps):
for x in range(size):
for dx in range(fsize):
hostOutData[b, oc, x * stride + dx * dilation] += hostW[ic, oc, 0, dx] * hostData[b, ic, x]
assert np.allclose(hostOutData[:, :, pad:-pad], deconv.data.get())
hostGrad = np.random.randn(*deconv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
deconv.backward(grad)
hostExtGrad = np.zeros(grad.shape[:2] + (grad.shape[2] + 2 * pad, ), dtype=np.float32)
hostExtGrad[:, :, pad:-pad] = hostGrad
hostGrad = hostExtGrad
hostInGrad = np.zeros(hostData.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for x in range(size):
for dx in range(fsize):
hostInGrad[b, ic, x] += hostGrad[b, oc, x * stride + dx * dilation] * hostW[ic, oc, 0, dx]
assert np.allclose(hostInGrad, deconv.grad.get())
hostWGrad = np.zeros(deconv.getVar("W").grad.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for dx in range(fsize):
for x in range(size):
hostWGrad[ic, oc, 0, dx] += hostGrad[b, oc, x * stride + dx * dilation] * hostData[b, ic, x]
assert np.allclose(hostWGrad, deconv.getVar("W").grad.get())
hostBGrad = np.empty(hostBias.shape, dtype=np.float32)
for oc in range(outmaps):
hostBGrad[0, oc, 0, 0] = np.sum(hostGrad[:, oc, :])
assert np.allclose(hostBGrad, deconv.getVar("b").grad.get())
def trainTest():
batchsize, inmaps, size = 5, 5, 2
outmaps = 1
fsize = 3
data = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, inmaps, size)).astype(np.float32))
deconv = Deconv1D(inmaps, outmaps, fsize)
from PuzzleLib.Cost.MSE import MSE
mse = MSE()
target = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, outmaps, 4)).astype(np.float32))
for i in range(100):
learnRate = 1e-2
deconv(data)
error, grad = mse(deconv.data, target)
deconv.backward(grad)
deconv.updateParams(learnRate)
if (i + 1) % 5 == 0:
print("Iteration #%d error: %s" % (i + 1, error))
if __name__ == "__main__":
unittest()
|
1648345
|
from termpixels.pixeldata import PixelData
from time import perf_counter
# Boxes
_BOX_T = 0
_BOX_B = 1
_BOX_L = 2
_BOX_R = 3
_BOX_TL = 4
_BOX_TR = 5
_BOX_BL = 6
_BOX_BR = 7
BOX_CHARS_ASCII = "--||++++"
BOX_CHARS_LIGHT = "──││┌┐└┘"
BOX_CHARS_LIGHT_ARC = "──││╭╮╰╯"
BOX_CHARS_HEAVY = "━━┃┃┏┓┗┛"
BOX_CHARS_DOUBLE = "══║║╔╗╚╝"
BOX_CHARS_LIGHT_DOUBLE_TOP = "═─││╒╕└┘"
# Frames
# logical indices for strings of frame characters
_FRAME_NO = -1 # not a frame character
_FRAME_H = 0 # horizontal index
_FRAME_V = 1 # vertical
_FRAME_TL = 2 # top left corner
_FRAME_TR = 3 # top right corner
_FRAME_BL = 4 # bottom left corner
_FRAME_BR = 5 # bottom right corner
_FRAME_VR = 6 # vertical and right
_FRAME_VL = 7 # vertical and left
_FRAME_HB = 8 # horizontal and bottom
_FRAME_HT = 9 # horizontal and top
_FRAME_VH = 10 # vertical and horizontal
_FRAME_L = 11 # left
_FRAME_T = 12 # top
_FRAME_R = 13 # right
_FRAME_B = 14 # bottom
# bitmask representing left, top, right, bottom
# each bit represents whether a character "points" or "flows" in that direction.
# for example, a top left corner (_FRAME_TL, e.g. "┌") points right and down.
_FRAME_GEOMETRY = {
_FRAME_NO: 0b0000,
_FRAME_L: 0b1000,
_FRAME_T: 0b0100,
_FRAME_R: 0b0010,
_FRAME_B: 0b0001,
_FRAME_H: 0b1010,
_FRAME_V: 0b0101,
_FRAME_TL: 0b0011,
_FRAME_TR: 0b1001,
_FRAME_BL: 0b0110,
_FRAME_BR: 0b1100,
_FRAME_VR: 0b0111,
_FRAME_VL: 0b1101,
_FRAME_HB: 0b1011,
_FRAME_HT: 0b1110,
_FRAME_VH: 0b1111
}
# inverse mapping of _FRAME_GEOMETRY
_GEOMETRY_FRAME = {geom: char for char, geom in _FRAME_GEOMETRY.items()}
FRAME_CHARS_LIGHT = "─│┌┐└┘├┤┬┴┼╴╵╶╷"
FRAME_CHARS_LIGHT_LONG = "─│┌┐└┘├┤┬┴┼─│─│"
FRAME_CHARS_HEAVY = "━┃┏┓┗┛┣┫┳┻╋╸╹╺╻"
FRAME_CHARS_HEAVY_LONG = "━┃┏┓┗┛┣┫┳┻╋━┃━┃"
FRAME_CHARS_DOUBLE = "═║╔╗╚╝╠╣╦╩╬═║═║"
# Spinners
SPINNER_SIX = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
SPINNER_PIPE = "-\\|/"
SPINNER_BAR = ["[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[=== ]",
"[== ]",
"[= ]"]
SPINNER_DOTS = [" ",
". ",
".. ",
"...",
" ..",
" ."]
SPINNER_MOON = "🌑🌒🌓🌔🌕🌖🌗🌘"
SPINNER_CLOCK = "🕛🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚"
SPINNER_BOX = "▖▘▝▗"
PROGRESS_SMOOTH = {
"start": "",
"end": "",
"bar_char": "█",
"head_chars": " ▏▎▍▌▋▊"
}
def draw_hline(buffer, y, char="─", **kwargs):
"""Draw a horizontal line along the given y coordinate.
"""
for x in range(buffer.w):
buffer.print(char, x, y, **kwargs)
def draw_vline(buffer, x, char="│", **kwargs):
"""Draw a vertical line along the given x coordinate.
"""
for y in range(buffer.h):
buffer.print(char, x, y, **kwargs)
def draw_box(buffer, x, y, w, h, chars=BOX_CHARS_LIGHT, **kwargs):
"""Draw a box using a set of box-drawing characters.
w and h must be positive (greater than zero) integers, or nothing is drawn.
If either dimension is 1, a line is drawn instead of a box.
If both dimensions are 1, nothing is drawn.
"""
if w < 1 or h < 1:
return
if h > 1:
for px, char_id in ((max(0, x + w - 1), _BOX_R), (x, _BOX_L)):
for py in range(y, y + h):
buffer.print(chars[char_id], px, py, **kwargs)
if w > 1:
for py, char_id in ((max(0, y + h - 1), _BOX_B), (y, _BOX_T)):
for px in range(x, x + w):
buffer.print(chars[char_id], px, py, **kwargs)
if w > 1 and h > 1:
buffer.print(chars[_BOX_TL], x, y, **kwargs)
buffer.print(chars[_BOX_TR], x + w - 1, y, **kwargs)
buffer.print(chars[_BOX_BL], x, y + h -1, **kwargs)
buffer.print(chars[_BOX_BR], x + w - 1, y + h - 1, **kwargs)
def _invert_geometry(geom):
"""Invert (rotate 180 degrees) a 4-bit frame geometry bitmask."""
return (geom >> 2) | ((geom << 2) & 0b1100)
def _frame_connecting_char(buffer, char, x, y, chars):
"""Connect a frame character to neighbors and return the resuling character index.
For example, imagine we are drawing a horizontal line "─" on top of a corner "└".
We want to produce the combined character "┴".
"""
assert char != _FRAME_NO
def buffer_char(x, y):
if not buffer.in_bounds(x, y):
return -1
char = buffer[x, y].char
try:
return chars.index(char)
except ValueError:
return -1
geom = _FRAME_GEOMETRY[char]
geom |= _invert_geometry(_FRAME_GEOMETRY[buffer_char(x - 1, y)] & 0b0010)
geom |= _invert_geometry(_FRAME_GEOMETRY[buffer_char(x, y - 1)] & 0b0001)
geom |= _invert_geometry(_FRAME_GEOMETRY[buffer_char(x + 1, y)] & 0b1000)
geom |= _invert_geometry(_FRAME_GEOMETRY[buffer_char(x, y + 1)] & 0b0100)
assert geom != _FRAME_NO
return _GEOMETRY_FRAME[geom]
def draw_frame(buffer, x, y, w, h, chars=FRAME_CHARS_LIGHT, **kwargs):
"""Draw a box, connecting it where it overlaps with an existing box."""
frame_pixels = []
def pixel(char_idx, x, y):
char = chars[_frame_connecting_char(buffer, char_idx, x, y, chars)]
frame_pixels.append((char, x, y))
if w < 1 or h < 1:
return
if h > 1:
for px in (max(0, x + w - 1), x):
for py in range(y, y + h):
pixel(_FRAME_V, px, py)
if h > 1:
pixel(_FRAME_B, px, y)
pixel(_FRAME_T, px, y + h - 1)
if w > 1:
for py in (max(0, y + h - 1), y):
for px in range(x, x + w):
pixel(_FRAME_H, px, py)
if w > 1:
pixel(_FRAME_R, x, py)
pixel(_FRAME_L, x + w - 1, py)
if w > 1 and h > 1:
pixel(_FRAME_TL, x, y)
pixel(_FRAME_TR, x + w - 1, y)
pixel(_FRAME_BL, x, y + h -1)
pixel(_FRAME_BR, x + w - 1, y + h - 1)
for char, x, y in frame_pixels:
buffer.put_char(char, x, y, **kwargs)
def draw_spinner(buffer, x, y, *, freq=1, t=None, frames=SPINNER_SIX, **kwargs):
"""Print a repeating animation.
Given a list of frames, selects a frame based on the given time (defaults to
the current time) and frequency (defaults to 1hz).
Typically called within a "frame" event listener to update the animation.
"""
if t is None:
t = perf_counter()
period = 1 / freq
f = (t % period) / period
frame = int(f * len(frames))
buffer.print(frames[frame], x, y, **kwargs)
def draw_progress(buffer, x, y, *, w, progress, start="[", end="]", bar_char="=", empty_char=" ", head_chars=">", fg=None, bg=None):
"""Draw a horizontal, left-to-right progress bar.
w -- the total width of the progress bar, including start and end strings
progress -- the progress in the range [0, 1]
start -- a string to display at the left of the progress bar
end -- a string to display at the right of the progress bar
bar_char -- a character with which to fill the completed portion of the bar
empty_char -- a character with which to fill the remaining portion of the bar
head_chars -- a sequence of characters to display at the "head" of the bar,
where the sequence is indexed based on what fraction of the
head character should be filled.
fg -- foreground color (default None to preserve)
bg -- background color (default None to preserve)
"""
if len(bar_char) != 1:
raise ValueError("bar_char must have length 1")
if len(empty_char) != 1:
raise ValueError("empty_char must have length 1")
bar_space = w - len(start) - len(end)
if bar_space < 0:
return
progress = min(1, max(0, progress))
bar_filled_chars = int(progress * bar_space)
buffer.fill(x, y, w, 1, fg=fg, bg=bg)
buffer.print(start, x, y)
buffer.print(bar_char * bar_filled_chars)
if bar_filled_chars < bar_space:
f = progress * bar_space - bar_filled_chars
head_char = head_chars[int(f * len(head_chars))]
buffer.print(head_char)
buffer.print(empty_char * (bar_space - bar_filled_chars - 1))
buffer.print(end)
def draw_colormap(buffer, colormap, x, y, *, w, h, char="█"):
"""Draw a color bitmap where each color is represented as one character cell.
colormap is a one-dimensional list of colors representing a 2D bitmap. Each
row of colors should be listed in sequence. The indexing formula is y*w+x.
colormap may contain Nones, indicating transparent pixels. Drawing a
transparent pixel will preserve the cell's contents.
Provide x and y coordinate for top-left of the bitmap in the destination.
Provide the width and height of the colormap.
"""
for dx in range(w):
for dy in range(h):
idx = dy * w + dx
if colormap[idx] is None:
continue
px = x + dx
py = y + dy
if not buffer.in_bounds(px, py):
continue
pixel = buffer[px, py]
pixel.char = char
pixel.fg = colormap[idx]
def draw_colormap_2x(buffer, colormap, x, y, *, w, h, char="▀"):
"""Draw a color bitmap at 2x vertical resolution using a box drawing character.
Creates two sub-pixels per terminal character (super-pixel) by using a box
drawing character and setting both foreground and background colors.
colormap is a one-dimensional list of colors representing a 2D bitmap. Each
row of colors should be listed in sequence. The indexing formula is y*w+x.
colormap may contain Nones, indicating transparent sub-pixels. Drawing a
super-pixel that is completely transparent will preserve its contents.
Drawing a super-pixel with one transparent sub-pixel will cause the
super-pixel's background color to show through, but will destroy its
contents.
Provide x and y coordinate for top-left of destination in super-pixel
coordinates.
The y coordinate may be fractional and will be rounded to the nearest
sub-pixel.
Provide width and height of colormap (in sub-pixel size). When drawn, the
vertical dimension of the colormap will be reduced by half.
"""
for dx in range(w):
for dy in range(h):
idx = dy * w + dx
if colormap[idx] is None:
continue
px = x + dx
fy = y + dy * 0.5
py = int(fy)
if not buffer.in_bounds(px, py):
continue
pixel = buffer[px, py]
if fy % 1 < 0.5:
pixel.char = char
pixel.fg = colormap[idx]
else:
if pixel.char != char:
pixel.char = char
pixel.fg = pixel.bg
pixel.bg = colormap[idx]
|
1648352
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.PFTau.PFClient_cfi import pfClient, pfClientJetRes
#from DQMOffline.PFTau.PFClient_cfi import *
pfJetClient = pfClient.clone(
FolderNames = ['PFJetValidation/CompWithGenJet'],
HistogramNames = ['delta_et_Over_et_VS_et_'],
CreateProfilePlots = True,
HistogramNamesForProfilePlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
pfMETClient = pfClient.clone(
FolderNames = ['PFMETValidation/CompWithGenMET'],
HistogramNames = ['delta_et_Over_et_VS_et_'],
CreateProfilePlots = True,
HistogramNamesForProfilePlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
pfJetResClient = pfClientJetRes.clone(
FolderNames = ['PFJetResValidation/JetPtRes'],
HistogramNames = ['delta_et_Over_et_VS_et_', 'BRdelta_et_Over_et_VS_et_', 'ERdelta_et_Over_et_VS_et_'],
CreateEfficiencyPlots = False,
HistogramNamesForEfficiencyPlots = ['pt_', 'eta_', 'phi_']
)
pfElectronClient = pfClient.clone(
FolderNames = ['PFElectronValidation/CompWithGenElectron'],
HistogramNames = [''],
CreateEfficiencyPlots = True,
HistogramNamesForEfficiencyPlots = ['pt_', 'eta_', 'phi_'],
HistogramNamesForProjectionPlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
|
1648382
|
from tests import OkTestCase
from server.models import db, Assignment, Backup, Message, User
from server import utils
class TestDownload(OkTestCase):
def _add_file(self, filename, contents):
self.setup_course()
email = '<EMAIL>'
self.login(email)
self.user = User.lookup(email)
self.backup = Backup(
submitter=self.user,
assignment=self.assignment,
submit=True)
self.message = Message(
backup=self.backup,
contents={
filename: contents,
'submit': True
},
kind='file_contents')
db.session.add(self.backup)
db.session.add(self.message)
db.session.commit()
def test_simple(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_200(response)
self.assertTrue('attachment' in response.headers['Content-Disposition'])
self.assertEqual(response.headers['Content-Type'], 'text/plain; charset=UTF-8')
self.assertEqual(response.headers['X-Content-Type-Options'], 'nosniff')
self.assertEqual(contents, response.data.decode('UTF-8'))
def test_raw(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}?raw=1".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_200(response)
self.assertTrue('inline' in response.headers['Content-Disposition'])
self.assertEqual(response.headers['Content-Type'], 'text/plain; charset=UTF-8')
self.assertEqual(response.headers['X-Content-Type-Options'], 'nosniff')
self.assertEqual(contents, response.data.decode('UTF-8'))
def test_incorrect_hash(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, "xxxxx", filename)
response = self.client.get(url)
self.assert_404(response)
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, "123", filename)
response = self.client.get(url)
self.assert_404(response)
def test_incorrect_submit_boolean(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
wrong_submit_str = "backups" if self.backup.submit else "submissions" # intentionally flipped
correct_submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}"
wrong_url = url.format(self.assignment.name, wrong_submit_str, encoded_id, filename)
redir_url = url.format(self.assignment.name, correct_submit_str, encoded_id, filename)
response = self.client.get(wrong_url)
self.assertRedirects(response, redir_url)
response = self.client.get(redir_url)
self.assertEqual(contents, response.data.decode('UTF-8'))
def test_unicode(self):
filename = "test.py"
contents = "⚡️ 🔥 💥 ❄️"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_200(response)
self.assertEqual(contents, response.data.decode('UTF-8'))
def test_folders(self):
filename = "tests/hof.py"
contents = "tests = {\nstatus: 'locked'\n}"
self._add_file(filename, contents)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_200(response)
self.assertEqual(contents, response.data.decode('UTF-8'))
def test_wrong_student(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
self.login('<EMAIL>')
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_404(response)
def test_staff(self):
filename = "test.py"
contents = "x = 4"
self._add_file(filename, contents)
self.login(self.staff1.email)
encoded_id = utils.encode_id(self.backup.id)
submit_str = "submissions" if self.backup.submit else "backups"
url = "/{0}/{1}/{2}/download/{3}".format(self.assignment.name, submit_str, encoded_id, filename)
response = self.client.get(url)
self.assert_200(response)
|
1648395
|
import json
from ..Ad.sub.AdFieldObject import AdFieldObject
class AdObject:
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.ad = None if 'ad' not in s else AdFieldObject(s['ad'])
self.adattr = None if 'adattr' not in s else s['adattr']
self.customerId = None if 'customerId' not in s else s['customerId']
self.editTm = None if 'editTm' not in s else s['editTm']
self.inspectRequestMsg = None if 'inspectRequestMsg' not in s else s['inspectRequestMsg']
self.inspectStatus = None if 'inspectStatus' not in s else s['inspectStatus']
self.nccAdId = None if 'nccAdId' not in s else s['nccAdId']
self.nccAdgroupId = None if 'nccAdgroupId' not in s else s['nccAdgroupId']
self.regTm = None if 'regTm' not in s else s['regTm']
self.status = None if 'status' not in s else s['status']
self.statusReason = None if 'statusReason' not in s else s['statusReason']
self.type = None if 'type' not in s else s['type']
self.userLock = None if 'userLock' not in s else s['userLock']
|
1648430
|
Python 3.4.4 (v3.4.4:737efcadf5a6, Dec 20 2015, 20:20:57) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>>
>>>
import os
import glob
import time
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
while True:
print(read_temp())
time.sleep(1)
|
1648439
|
import unittest
import copy
import torch
import os
import shutil
import csv
import tempfile
import trojai.modelgen.config as tpmc
import trojai.modelgen.architecture_factory as tpmaf
import trojai.modelgen.data_manager as tpmd
import trojai.modelgen.architectures.mnist_architectures as tpma
import trojai.modelgen.default_optimizer as tpmo
"""
Test custom __deepcopy__ implementations
"""
class TestCopyImplementations(unittest.TestCase):
def setUp(self) -> None:
self.tmp_file = tempfile.TemporaryDirectory()
self.experiment_path = self.tmp_file.name
self.train_file = os.path.join(self.experiment_path, "train.csv")
self.clean_test_file = os.path.join(self.experiment_path, "test.csv")
self.triggered_file = os.path.join(self.experiment_path, "triggered.csv")
self.model_save_dir = os.path.join(self.experiment_path, "model_save_dir")
self.stats_save_dir = os.path.join(self.experiment_path, "stats_save_dir")
# make dummy CSV files to pass validation()
dummy_csv_files = [self.train_file, self.clean_test_file, self.triggered_file]
for ff in dummy_csv_files:
dummy_dict = {"a": "1", "b": "2", "c": "3"}
with open(ff, "w") as f:
writer = csv.writer(f)
for i in dummy_dict:
writer.writerow([i, dummy_dict[i]])
f.close()
def tearDown(self) -> None:
self.tmp_file.cleanup()
def test_training_config_copy1(self):
t1 = tpmc.TrainingConfig()
t2 = copy.deepcopy(t1)
self.assertEqual(t1, t2)
def test_training_config_copy2(self):
t1 = tpmc.TrainingConfig(torch.device("cpu"))
t2 = copy.deepcopy(t1)
self.assertEqual(t1, t2)
def test_reporting_config_copy(self):
r1 = tpmc.ReportingConfig()
r2 = copy.deepcopy(r1)
self.assertEqual(r1, r2)
def test_default_optimizer_config_copy(self):
o1 = tpmc.DefaultOptimizerConfig()
o2 = tpmc.DefaultOptimizerConfig()
self.assertEqual(o1, o2)
def test_model_generator_config_copy(self):
class MyArchFactory(tpmaf.ArchitectureFactory):
def new_architecture(self):
return tpma.ModdedLeNet5Net(channels=1)
arch = MyArchFactory()
# setup the xforms to ensure we can test the callables
def data_xform(x): return x*x
def label_xform(y): return y*y*y
data = tpmd.DataManager(self.experiment_path, self.train_file, self.clean_test_file,
triggered_test_file=self.triggered_file,
train_data_transform=data_xform,
train_label_transform=label_xform,
test_data_transform=data_xform,
test_label_transform=label_xform,
file_loader='image',
shuffle_train=True,
shuffle_clean_test=False,
shuffle_triggered_test=False)
num_models = 1
mgc1 = tpmc.ModelGeneratorConfig(arch, data, self.model_save_dir, self.stats_save_dir, num_models)
mgc2 = copy.deepcopy(mgc1)
self.assertEqual(mgc1, mgc2)
def test_default_optimizer_copy(self):
opt1 = tpmo.DefaultOptimizer()
opt2 = tpmo.DefaultOptimizer()
self.assertEqual(opt1, opt2)
def test_data_manager_copy(self):
def train_data_xform(x): return x*x
def train_label_xform(y): return y*y*y
def test_data_xform(x): return x**2
def test_label_xform(y): return y + 2
dat1 = tpmd.DataManager(self.experiment_path, self.train_file, self.clean_test_file,
triggered_test_file=self.triggered_file,
train_data_transform=train_data_xform,
train_label_transform=train_label_xform,
test_data_transform=test_data_xform,
test_label_transform=test_label_xform,
file_loader='image',
shuffle_train=True,
shuffle_clean_test=False,
shuffle_triggered_test=False)
dat2 = copy.deepcopy(dat1)
self.assertEqual(dat1, dat2)
self.assertEqual(train_data_xform, dat1.train_data_transform)
self.assertEqual(train_data_xform, dat2.train_data_transform)
self.assertEqual(train_label_xform, dat1.train_label_transform)
self.assertEqual(train_label_xform, dat2.train_label_transform)
self.assertEqual(test_data_xform, dat1.test_data_transform)
self.assertEqual(test_data_xform, dat2.test_data_transform)
self.assertEqual(test_label_xform, dat1.test_label_transform)
self.assertEqual(test_label_xform, dat2.test_label_transform)
if __name__ == '__main__':
unittest.main()
|
1648465
|
import unittest
from unittest.mock import MagicMock
from git_gopher.Fzf import Fzf
from git_gopher.CommandRunner import CommandRunner
from git_gopher.GitDataGetter import GitDataGetter
from git_gopher.HistoryCommandRunner import HistoryCommandRunner
from git_gopher.StashMessage import StashMessage
class TestStashMessage(unittest.TestCase):
def test_run(self):
message = 'foo'
command_runner = CommandRunner()
git_data_getter = GitDataGetter(Fzf(), command_runner)
git_data_getter.get_stash_message_from_input = MagicMock(return_value=message)
hist_command_runer = HistoryCommandRunner(git_data_getter, command_runner)
hist_command_runer.run = MagicMock()
stash_message = StashMessage(hist_command_runer, git_data_getter)
stash_message.run()
hist_command_runer.run.assert_called_once_with(['git', 'stash', 'push', '-m', message])
if __name__ == '__main__':
unittest.main()
|
1648493
|
from submission import Submission
class SilvestreSubmission(Submission):
def run(self, s):
"""
:param s: input in string format
:return: solution in integer format
"""
result = 0
char_list = list(s)
char_list.append(char_list[0])
for i, char in enumerate(char_list[:-1]):
if char == char_list[i+1]:
result += int(char)
return result
|
1648497
|
import unittest
class CameraSimulatorTest(unittest.TestCase):
def test_get_camera_info_msg(self):
pass
|
1648558
|
import glob
from os.path import split
import os
rel = "" if os.getcwd().endswith("scripts") else "./scripts/"
HEADER_PATH = rel + 'README_HEADER.md'
EXAMPLES_PATH = rel + "../examples"
README_DESTINATION = rel + "../README.md"
TEST_FILE_PATH = rel + "../pytago/tests/test_core.py"
DISABLED_EXAMPLES = {
"iterunpacking",
"dunders",
"pop",
"ingenerator"
}
def get_usage_string():
import subprocess
out = subprocess.check_output(["pytago", "-h"])
return '\n'.join(out.decode().splitlines()).replace("Pytago", "pytago", 1)
def main():
parts = []
with open(HEADER_PATH) as f:
TEMPLATE = f.read()
with open(TEST_FILE_PATH) as f:
TEST_CODE = f.read()
def example_sort_key(a):
ext = a.split('.')[-1]
example_name = split(a)[-1].removesuffix(".py").removesuffix(".go")
try:
return TEST_CODE.index(example_name), 0 if ext == "py" else 1
except ValueError:
return -1, 0 if ext == "py" else 1
examples = glob.glob(EXAMPLES_PATH + "/*")
examples.sort(key=example_sort_key)
for i, example in enumerate(examples):
example_name = split(example)[-1].removesuffix(".py").removesuffix(".go")
if example_name in DISABLED_EXAMPLES:
continue
is_python = i % 2 == 0
if is_python:
assert example.endswith(".py")
parts.append(f'### {example_name}')
else:
assert example.endswith(".go")
with open(example) as f:
if is_python:
parts.append("""#### Python""")
else:
parts.append("""#### Go""")
parts.append('```' + ("python" if is_python else "go"))
for line in f.read().splitlines(keepends=False):
if is_python and line.strip().startswith('#'):
continue
parts.append(line)
parts.append("```")
example_code = '\n'.join(parts)
with open(README_DESTINATION, "w") as f:
t = TEMPLATE.replace('{% usage %}', get_usage_string())
t = t.replace('{% examples %}', example_code)
f.write(t)
if __name__ == '__main__':
main()
|
1648616
|
import sys
sys.path.append("..")
import os
import math
import torch
import torch.nn as nn
import torchvision
import model.E.E_Blur as BE
import model.E.E_PG as BE_PG
import model.E.E_BIG as BE_BIG
from model.utils.custom_adam import LREQAdam
import lpips
from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam
import tensorboardX
import numpy as np
import argparse
from model.stylegan1.net import Generator, Mapping #StyleGANv1
import model.stylegan2_generator as model_v2 #StyleGANv2
import model.pggan.pggan_generator as model_pggan #PGGAN
from model.biggan_generator import BigGAN #BigGAN
from model.utils.biggan_config import BigGANConfig
from training_utils import *
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
#torch.backends.cudnn.deterministic = False # faster
def train(tensor_writer = None, args = None):
type = args.mtype
model_path = args.checkpoint_dir_GAN
if type == 1: # StyleGAN1
Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth'))
Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024
Gm.load_state_dict(torch.load(model_path+'/Gm_dict.pth'))
Gm.buffer1 = torch.load(model_path+'/center_tensor.pt')
const_ = Gs.const
const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda()
layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024
layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17]
ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1]
coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1]
Gs.cuda()
Gm.eval()
E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
elif type == 2: # StyleGAN2
generator = model_v2.StyleGAN2Generator(resolution=args.img_size).to(device)
checkpoint = torch.load(model_path) #map_location='cpu'
if 'generator_smooth' in checkpoint: #default
generator.load_state_dict(checkpoint['generator_smooth'])
else:
generator.load_state_dict(checkpoint['generator'])
synthesis_kwargs = dict(trunc_psi=0.7,trunc_layers=8,randomize_noise=False)
#Gs = generator.synthesis
#Gm = generator.mapping
const_r = torch.randn(args.batch_size)
const1 = generator.synthesis.early_layer(const_r).detach().clone() #[n,512,4,4]
#E = BE.BE(startf=64, maxf=512, layer_count=7, latent_size=512, channels=3) # 256
E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) # layer_count: 7->256 8->512 9->1024
elif type == 3: # PGGAN
generator = model_pggan.PGGANGenerator(resolution=args.img_size).to(device)
checkpoint = torch.load(model_path) #map_location='cpu'
if 'generator_smooth' in checkpoint: #默认是这个
generator.load_state_dict(checkpoint['generator_smooth'])
else:
generator.load_state_dict(checkpoint['generator'])
const1 = torch.tensor(0)
E = BE_PG.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3, pggan=True)
elif type == 4:
config = BigGANConfig.from_json_file(args.config_dir)
generator = BigGAN(config).to(device)
generator.load_state_dict(torch.load(model_path))
E = BE_BIG.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3, biggan=True)
else:
print('error')
return
if args.checkpoint_dir_E != None:
E.load_state_dict(torch.load(args.checkpoint_dir_E))
E.cuda()
writer = tensor_writer
E_optimizer = LREQAdam([{'params': E.parameters()},], lr=0.0015, betas=(0.0, 0.99), weight_decay=0)
#用这个adam不会报错:RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
loss_lpips = lpips.LPIPS(net='vgg').to('cuda')
batch_size = args.batch_size
it_d = 0
#vgg16->Grad-CAM
vgg16 = torchvision.models.vgg16(pretrained=True).cuda()
final_layer = None
for name, m in vgg16.named_modules():
if isinstance(m, nn.Conv2d):
final_layer = name
grad_cam_plus_plus = GradCamPlusPlus(vgg16, final_layer)
gbp = GuidedBackPropagation(vgg16)
it_d = 0
for iteration in range(0,args.iterations):
set_seed(iteration%30000)
z = torch.randn(batch_size, args.z_dim) #[32, 512]
if type == 1:
with torch.no_grad(): #这里需要生成图片和变量
w1 = Gm(z,coefs_m=coefs).cuda() #[batch_size,18,512]
imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256
elif type == 2:
with torch.no_grad():
result_all = generator(z.cuda(), **synthesis_kwargs)
imgs1 = result_all['image']
w1 = result_all['wp']
elif type == 3:
with torch.no_grad(): #这里需要生成图片和变量
w1 = z.cuda()
result_all = generator(w1)
imgs1 = result_all['image']
elif type == 4:
z = truncated_noise_sample(truncation=0.4, batch_size=batch_size, seed=iteration%30000)
#label = np.random.randint(1000,size=batch_size) # 生成标签
flag = np.random.randint(1000)
label = np.ones(batch_size)
label = flag * label
label = one_hot(label)
w1 = torch.tensor(z, dtype=torch.float).cuda()
conditions = torch.tensor(label, dtype=torch.float).cuda() # as label
truncation = torch.tensor(0.4, dtype=torch.float).cuda()
with torch.no_grad(): #这里需要生成图片和变量
imgs1, const1 = generator(w1, conditions, truncation) # const1 are conditional vectors in BigGAN
if type != 4:
const2,w2 = E(imgs1)
else:
const2,w2 = E(imgs1, const1)
if type == 1:
imgs2=Gs.forward(w2,int(math.log(args.img_size,2)-2))
elif type == 2 or type == 3:
imgs2=generator.synthesis(w2)['image']
elif type == 4:
imgs2, _ = generator(w2, conditions, truncation)
else:
print('model type error')
return
E_optimizer.zero_grad()
#Image Vectors
mask_1 = grad_cam_plus_plus(imgs1,None) #[c,1,h,w]
mask_2 = grad_cam_plus_plus(imgs2,None)
# imgs1.retain_grad()
# imgs2.retain_grad()
imgs1_ = imgs1.detach().clone()
imgs1_.requires_grad = True
imgs2_ = imgs2.detach().clone()
imgs2_.requires_grad = True
grad_1 = gbp(imgs1_) # [n,c,h,w]
grad_2 = gbp(imgs2_)
heatmap_1,cam_1 = mask2cam(mask_1,imgs1)
heatmap_2,cam_2 = mask2cam(mask_2,imgs2)
loss_grad, loss_grad_info = space_loss(grad_1,grad_2,lpips_model=loss_lpips)
##--Image
loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips)
E_optimizer.zero_grad()
loss_imgs.backward(retain_graph=True)
E_optimizer.step()
##--Mask_Cam as AT1 (HeatMap from Mask)
mask_1 = mask_1.float().to(device)
mask_1.requires_grad=True
mask_2 = mask_2.float().to(device)
mask_2.requires_grad=True
loss_mask, loss_mask_info = space_loss(mask_1,mask_2,lpips_model=loss_lpips)
loss_mask = loss_mask * 5.0
E_optimizer.zero_grad()
loss_mask.backward(retain_graph=True)
E_optimizer.step()
##--Grad_CAM as AT2 (from mask with img)
cam_1 = cam_1.float().to(device)
cam_1.requires_grad=True
cam_2 = cam_2.float().to(device)
cam_2.requires_grad=True
loss_Gcam, loss_Gcam_info = space_loss(cam_1,cam_2,lpips_model=loss_lpips)
loss_Gcam = loss_Gcam * 9.0
E_optimizer.zero_grad()
loss_Gcam.backward(retain_graph=True)
E_optimizer.step()
#Latent Vectors
##--C
loss_c, loss_c_info = space_loss(const1,const2,image_space = False)
##--W
loss_w, loss_w_info = space_loss(w1,w2,image_space = False)
E_optimizer.zero_grad()
loss_w.backward()
E_optimizer.step()
print('ep_%d_iter_%d'%(iteration//30000,iteration%30000))
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_ssim, loss_imgs_cosine, loss_kl_imgs, loss_imgs_lpips]')
print('---------ImageSpace--------')
print('loss_mask_info: %s'%loss_mask_info)
print('loss_grad_info: %s'%loss_grad_info)
print('loss_imgs_info: %s'%loss_imgs_info)
print('loss_Gcam_info: %s'%loss_Gcam_info)
print('---------LatentSpace--------')
print('loss_w_info: %s'%loss_w_info)
print('loss_c_info: %s'%loss_c_info)
it_d += 1
writer.add_scalar('loss_mask_mse', loss_mask_info[0][0], global_step=it_d)
writer.add_scalar('loss_mask_mse_mean', loss_mask_info[0][1], global_step=it_d)
writer.add_scalar('loss_mask_mse_std', loss_mask_info[0][2], global_step=it_d)
writer.add_scalar('loss_mask_kl', loss_mask_info[1], global_step=it_d)
writer.add_scalar('loss_mask_cosine', loss_mask_info[2], global_step=it_d)
writer.add_scalar('loss_mask_ssim', loss_mask_info[3], global_step=it_d)
writer.add_scalar('loss_mask_lpips', loss_mask_info[4], global_step=it_d)
writer.add_scalar('loss_grad_mse', loss_grad_info[0][0], global_step=it_d)
writer.add_scalar('loss_grad_mse_mean', loss_grad_info[0][1], global_step=it_d)
writer.add_scalar('loss_grad_mse_std', loss_grad_info[0][2], global_step=it_d)
writer.add_scalar('loss_grad_kl', loss_grad_info[1], global_step=it_d)
writer.add_scalar('loss_grad_cosine', loss_grad_info[2], global_step=it_d)
writer.add_scalar('loss_grad_ssim', loss_grad_info[3], global_step=it_d)
writer.add_scalar('loss_grad_lpips', loss_grad_info[4], global_step=it_d)
writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d)
writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d)
writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d)
writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d)
writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d)
writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d)
writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d)
writer.add_scalar('loss_Gcam', loss_Gcam_info[0][0], global_step=it_d)
writer.add_scalar('loss_Gcam_mean', loss_Gcam_info[0][1], global_step=it_d)
writer.add_scalar('loss_Gcam_std', loss_Gcam_info[0][2], global_step=it_d)
writer.add_scalar('loss_Gcam_kl', loss_Gcam_info[1], global_step=it_d)
writer.add_scalar('loss_Gcam_cosine', loss_Gcam_info[2], global_step=it_d)
writer.add_scalar('loss_Gcam_ssim', loss_Gcam_info[3], global_step=it_d)
writer.add_scalar('loss_Gcam_lpips', loss_Gcam_info[4], global_step=it_d)
writer.add_scalar('loss_w_mse', loss_w_info[0][0], global_step=it_d)
writer.add_scalar('loss_w_mse_mean', loss_w_info[0][1], global_step=it_d)
writer.add_scalar('loss_w_mse_std', loss_w_info[0][2], global_step=it_d)
writer.add_scalar('loss_w_kl', loss_w_info[1], global_step=it_d)
writer.add_scalar('loss_w_cosine', loss_w_info[2], global_step=it_d)
writer.add_scalar('loss_w_ssim', loss_w_info[3], global_step=it_d)
writer.add_scalar('loss_w_lpips', loss_w_info[4], global_step=it_d)
writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d)
writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d)
writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d)
writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d)
writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d)
writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d)
writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d)
writer.add_scalars('Image_Space_MSE', {'loss_mask_mse':loss_mask_info[0][0],'loss_grad_mse':loss_grad_info[0][0],'loss_img_mse':loss_imgs_info[0][0]}, global_step=it_d)
writer.add_scalars('Image_Space_KL', {'loss_mask_kl':loss_mask_info[1],'loss_grad_kl':loss_grad_info[1],'loss_imgs_kl':loss_imgs_info[1]}, global_step=it_d)
writer.add_scalars('Image_Space_Cosine', {'loss_mask_cosine':loss_mask_info[2],'loss_grad_cosine':loss_grad_info[2],'loss_imgs_cosine':loss_imgs_info[2]}, global_step=it_d)
writer.add_scalars('Image_Space_SSIM', {'loss_mask_ssim':loss_mask_info[3],'loss_grad_ssim':loss_grad_info[3],'loss_img_ssim':loss_imgs_info[3]}, global_step=it_d)
writer.add_scalars('Image_Space_Lpips', {'loss_mask_lpips':loss_mask_info[4],'loss_grad_lpips':loss_grad_info[4],'loss_img_lpips':loss_imgs_info[4]}, global_step=it_d)
writer.add_scalars('Latent Space W', {'loss_w_mse':loss_w_info[0][0],'loss_w_mse_mean':loss_w_info[0][1],'loss_w_mse_std':loss_w_info[0][2],'loss_w_kl':loss_w_info[1],'loss_w_cosine':loss_w_info[2]}, global_step=it_d)
writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_w_info[1],'loss_c_cosine':loss_w_info[2]}, global_step=it_d)
if iteration % 100 == 0:
n_row = batch_size
test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5
torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.png'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3
heatmap=torch.cat((heatmap_1,heatmap_2))
cam=torch.cat((cam_1,cam_2))
grads = torch.cat((grad_1,grad_2))
grads = grads.data.cpu().numpy() # [n,c,h,w]
grads -= np.max(np.min(grads), 0)
grads /= np.max(grads)
torchvision.utils.save_image(torch.tensor(heatmap),resultPath_grad_cam+'/heatmap_%d.png'%(iteration),nrow=n_row)
torchvision.utils.save_image(torch.tensor(cam),resultPath_grad_cam+'/cam_%d.png'%(iteration),nrow=n_row)
torchvision.utils.save_image(torch.tensor(grads),resultPath_grad_cam+'/gb_%d.png'%(iteration),nrow=n_row)
with open(resultPath+'/Loss.txt', 'a+') as f:
print('i_'+str(iteration),file=f)
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f)
print('---------ImageSpace--------',file=f)
print('loss_mask_info: %s'%loss_mask_info,file=f)
print('loss_grad_info: %s'%loss_grad_info,file=f)
print('loss_imgs_info: %s'%loss_imgs_info,file=f)
print('loss_Gcam_info: %s'%loss_Gcam_info,file=f)
print('---------LatentSpace--------',file=f)
print('loss_w_info: %s'%loss_w_info,file=f)
print('loss_c_info: %s'%loss_c_info,file=f)
if iteration % 5000 == 0:
torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000))
#torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='the training args')
parser.add_argument('--iterations', type=int, default=120001)
parser.add_argument('--lr', type=float, default=0.0015)
parser.add_argument('--beta_1', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--experiment_dir', default=None)
parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v2/stylegan2_cat256.pth')
parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it
parser.add_argument('--checkpoint_dir_E', default=None)#'./result/StyleGAN2-CAT256-MisAligned-solveDetach&Clone-FronterImageVecvtors/models/E_model_ep0_iter20000.pth'
parser.add_argument('--img_size',type=int, default=256)
parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1
parser.add_argument('--z_dim', type=int, default=512) # BigGAN,z=128, PGGAN and StyleGANs = 512
parser.add_argument('--mtype', type=int, default=2) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN00
parser.add_argument('--start_features', type=int, default=64) # 16->1024 32->512 64->256
args = parser.parse_args()
if not os.path.exists('./result'): os.mkdir('./result')
resultPath = args.experiment_dir
if resultPath == None:
resultPath = "./result/StyleGAN2-Cat256-Case2-MisAligned-w"
if not os.path.exists(resultPath): os.mkdir(resultPath)
resultPath1_1 = resultPath+"/imgs"
if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1)
resultPath1_2 = resultPath+"/models"
if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2)
resultPath_grad_cam = resultPath+"/grad_cam"
if not os.path.exists(resultPath_grad_cam): os.mkdir(resultPath_grad_cam)
use_gpu = True
device = torch.device("cuda" if use_gpu else "cpu")
writer_path = os.path.join(resultPath, './summaries')
if not os.path.exists(writer_path): os.mkdir(writer_path)
writer = tensorboardX.SummaryWriter(writer_path)
train(tensor_writer=writer, args= args)
|
1648636
|
import datetime as dt
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pds
# Orbits period is a pandas.Timedelta kwarg, and the pandas repr
# does not include a module name. Import required to run eval
# on Orbit representation
from pandas import Timedelta # noqa: F401
import pytest
import pysat
class TestOrbitsUserInterface():
def setup(self):
""" Set up User Interface unit tests
"""
self.in_args = ['pysat', 'testing']
self.in_kwargs = {'clean_level': 'clean', 'update_files': True}
self.testInst = None
self.stime = dt.datetime(2009, 1, 1)
def teardown(self):
""" Tear down user interface tests
"""
del self.in_args, self.in_kwargs, self.testInst, self.stime
def test_orbit_w_bad_kind(self):
""" Test orbit failure with bad 'kind' input
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt', 'kind': 'cats'}
with pytest.raises(ValueError):
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
@pytest.mark.parametrize("info", [({'index': 'magnetic local time',
'kind': 'longitude'}),
(None),
({'index': 'magnetic local time',
'kind': 'lt'}),
({'index': 'magnetic local time',
'kind': 'polar'}),
({'index': 'magnetic local time',
'kind': 'orbit'})])
def test_orbit_w_bad_orbit_info(self, info):
""" Test orbit failure on iteration with orbit initialization
"""
self.in_kwargs['orbit_info'] = info
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
self.testInst.load(date=self.stime)
with pytest.raises(ValueError):
self.testInst.orbits.next()
@pytest.mark.parametrize("info", [({'index': 'magnetic local time',
'kind': 'polar'}),
({'index': 'magnetic local time',
'kind': 'orbit'}),
({'index': 'magnetic local time',
'kind': 'longitude'}),
({'index': 'magnetic local time',
'kind': 'lt'})])
def test_orbit_polar_w_missing_orbit_index(self, info):
""" Test orbit failure oon iteration with missing orbit index
"""
self.in_kwargs['orbit_info'] = info
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
# Force index to None beforee loading and iterating
self.testInst.orbits.orbit_index = None
self.testInst.load(date=self.stime)
with pytest.raises(ValueError):
self.testInst.orbits.next()
def test_orbit_repr(self):
""" Test the Orbit representation
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt'}
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
out_str = self.testInst.orbits.__repr__()
assert out_str.find("Orbits(") >= 0
def test_orbit_str(self):
""" Test the Orbit string representation with data
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt'}
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
self.testInst.load(date=self.stime)
out_str = self.testInst.orbits.__str__()
assert out_str.find("Orbit Settings") >= 0
assert out_str.find("Orbit Lind: local time") < 0
class TestSpecificUTOrbits():
def setup(self):
"""Runs before every method to create a clean testing setup
"""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
self.inc_min = 97
self.etime = None
def teardown(self):
"""Runs after every method to clean up previous testing
"""
del self.testInst, self.stime, self.inc_min, self.etime
@pytest.mark.parametrize('orbit_inc', [(0), (1), (-1), (-2), (14)])
def test_single_orbit_call_by_index(self, orbit_inc):
"""Test successful orbit call by index
"""
# Load the data
self.testInst.load(date=self.stime)
self.testInst.orbits[orbit_inc]
# Increment the time
if orbit_inc >= 0:
self.stime += dt.timedelta(minutes=orbit_inc * self.inc_min)
else:
self.stime += dt.timedelta(minutes=self.inc_min
* (np.ceil(1440.0 / self.inc_min)
+ orbit_inc))
self.etime = self.stime + dt.timedelta(seconds=(self.inc_min * 60 - 1))
# Test the time
assert (self.testInst.index[0] == self.stime)
assert (self.testInst.index[-1] == self.etime)
@pytest.mark.parametrize("orbit_ind,raise_err", [(17, Exception),
(None, TypeError)])
def test_single_orbit_call_bad_index(self, orbit_ind, raise_err):
""" Test orbit failure with bad index
"""
self.testInst.load(date=self.stime)
with pytest.raises(raise_err):
self.testInst.orbits[orbit_ind]
def test_oribt_number_via_current_multiple_orbit_calls_in_day(self):
""" Test orbit number with mulitple orbits calls in a day
"""
self.testInst.load(date=self.stime)
self.testInst.bounds = (self.stime, None)
true_vals = np.arange(15)
true_vals[-1] = 0
test_vals = []
for i, inst in enumerate(self.testInst.orbits):
if i > 14:
break
test_vals.append(inst.orbits.current)
assert inst.orbits.current == self.testInst.orbits.current
assert np.all(test_vals == true_vals)
def test_all_single_orbit_calls_in_day(self):
""" Test all single orbit calls in a day
"""
self.testInst.load(date=self.stime)
self.testInst.bounds = (self.stime, None)
for i, inst in enumerate(self.testInst.orbits):
if i > 14:
break
# Test the start index
self.etime = self.stime + i * relativedelta(minutes=self.inc_min)
assert inst.index[0] == self.etime
assert self.testInst.index[0] == self.etime
# Test the end index
self.etime += relativedelta(seconds=((self.inc_min * 60) - 1))
assert inst.index[-1] == self.etime
assert self.testInst.index[-1] == self.etime
def test_orbit_next_call_no_loaded_data(self):
""" Test orbit next call without loading data
"""
self.testInst.orbits.next()
assert (self.testInst.index[0] == dt.datetime(2008, 1, 1))
assert (self.testInst.index[-1] == dt.datetime(2008, 1, 1, 0, 38, 59))
def test_orbit_prev_call_no_loaded_data(self):
""" Test orbit previous call without loading data
"""
self.testInst.orbits.prev()
# this isn't a full orbit
assert (self.testInst.index[-1]
== dt.datetime(2010, 12, 31, 23, 59, 59))
assert (self.testInst.index[0] == dt.datetime(2010, 12, 31, 23, 49))
def test_single_orbit_call_orbit_starts_0_UT_using_next(self):
""" Test orbit next call with data
"""
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
self.etime = self.stime + dt.timedelta(seconds=(self.inc_min * 60 - 1))
assert (self.testInst.index[0] == self.stime)
assert (self.testInst.index[-1] == self.etime)
def test_single_orbit_call_orbit_starts_0_UT_using_prev(self):
""" Test orbit prev call with data
"""
self.testInst.load(date=self.stime)
self.testInst.orbits.prev()
self.stime += 14 * relativedelta(minutes=self.inc_min)
self.etime = self.stime + dt.timedelta(seconds=((self.inc_min * 60)
- 1))
assert self.testInst.index[0] == self.stime
assert self.testInst.index[-1] == self.etime
def test_single_orbit_call_orbit_starts_off_0_UT_using_next(self):
""" Test orbit next call with data for previous day
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
assert (self.testInst.index[0] == dt.datetime(2008, 12, 30, 23, 45))
assert (self.testInst.index[-1]
== (dt.datetime(2008, 12, 30, 23, 45)
+ relativedelta(seconds=(self.inc_min * 60 - 1))))
def test_single_orbit_call_orbit_starts_off_0_UT_using_prev(self):
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.prev()
assert (self.testInst.index[0]
== (dt.datetime(2009, 1, 1)
- relativedelta(minutes=self.inc_min)))
assert (self.testInst.index[-1]
== (dt.datetime(2009, 1, 1) - relativedelta(seconds=1)))
class TestGeneralOrbitsMLT():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
return
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
return
def test_equality_with_copy(self):
"""Test that copy is the same as original"""
self.out = self.testInst.orbits.copy()
assert self.out == self.testInst.orbits
return
def test_equality_with_data_with_copy(self):
"""Test that copy is the same as original"""
# Load data
self.testInst.load(date=self.stime)
# Load up an orbit
self.testInst.orbits[0]
self.out = self.testInst.orbits.copy()
assert self.out == self.testInst.orbits
return
def test_inequality_different_data(self):
"""Test that equality is false if different data"""
# Load data
self.testInst.load(date=self.stime)
# Load up an orbit
self.testInst.orbits[0]
# Make copy
self.out = self.testInst.orbits.copy()
# Modify data
self.out._full_day_data = self.testInst._null_data
assert self.out != self.testInst.orbits
return
def test_inequality_modified_object(self):
"""Test that equality is false if other missing attributes"""
self.out = self.testInst.orbits.copy()
# Remove attribute
del self.out.orbit_index
assert self.testInst.orbits != self.out
return
def test_inequality_reduced_object(self):
"""Test that equality is false if self missing attributes"""
self.out = self.testInst.orbits.copy()
self.out.hi_there = 'hi'
assert self.testInst.orbits != self.out
return
def test_inequality_different_type(self):
"""Test that equality is false if different type"""
assert self.testInst.orbits != self.testInst
return
def test_eval_repr(self):
"""Test eval of repr recreates object"""
# eval and repr don't play nice for custom functions
if len(self.testInst.custom_functions) != 0:
self.testInst.custom_clear()
self.out = eval(self.testInst.orbits.__repr__())
assert self.out == self.testInst.orbits
return
def test_repr_and_copy(self):
"""Test repr consistent with object copy"""
# Not tested with eval due to issues with datetime
self.out = self.testInst.orbits.__repr__()
second_out = self.testInst.orbits.copy().__repr__()
assert self.out == second_out
return
def test_load_orbits_w_empty_data(self):
""" Test orbit loading outside of the instrument data range
"""
self.stime -= dt.timedelta(days=365 * 100)
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
with pytest.raises(StopIteration):
self.testInst.orbits.next()
def test_less_than_one_orbit_of_data(self):
"""Test successful load with less than one orbit of data
"""
def filter_data(inst):
""" Local helper function to reduce available data
"""
inst.data = inst[0:20]
self.testInst.custom_attach(filter_data)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
assert self.testInst.date == self.stime
def test_less_than_one_orbit_of_data_two_ways(self):
def filter_data(inst):
inst.data = inst[0:5]
self.testInst.custom_attach(filter_data)
self.testInst.load(date=self.stime)
# starting from no orbit calls next loads first orbit
self.testInst.orbits.next()
# store comparison data
saved_data = self.testInst.copy()
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
assert all(self.testInst.data == saved_data.data)
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
d1check = self.testInst.date == saved_data.date
assert d1check
def test_less_than_one_orbit_of_data_four_ways_two_days(self):
""" Test successful loading of different parital orbits
"""
# create situation where the < 1 orbit split across two days
def filter_data(inst):
"""Local function for breaking up orbits
"""
if inst.date == dt.datetime(2009, 1, 5):
inst.data = inst[0:20]
elif inst.date == dt.datetime(2009, 1, 4):
inst.data = inst[-20:]
return
self.testInst.custom_attach(filter_data)
self.stime += dt.timedelta(days=3)
self.testInst.load(date=self.stime)
# starting from no orbit calls next loads first orbit
self.testInst.orbits.next()
# store comparison data
saved_data = self.testInst.copy()
self.testInst.load(date=self.stime + dt.timedelta(days=1))
self.testInst.orbits[0]
if self.testInst.orbits.num == 1:
# equivalence only when only one orbit
# some test settings can violate this assumption
assert all(self.testInst.data == saved_data.data)
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
assert all(self.testInst.data == saved_data.data)
self.testInst.load(date=self.stime + dt.timedelta(days=1))
self.testInst.orbits.prev()
if self.testInst.orbits.num == 1:
assert all(self.testInst.data == saved_data.data)
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
d1check = self.testInst.date == saved_data.date
assert d1check
def test_repeated_orbit_calls_symmetric_single_day_start_with_last(self):
self.testInst.load(date=self.stime)
# start on last orbit of last day
self.testInst.orbits[0]
self.testInst.orbits.prev()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_single_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_single_day_off_0_UT(self):
""" Test successful orbit calls for a day about a time off 00:00 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_multi_day_off_0_UT(self):
""" Test successful orbit calls for days about a time off 00:00 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_day_off_0_UT(self):
""" Test successful orbit calls for different days about a time off 0 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
for j in range(10):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_multi_day_off_0_UT(self):
""" Test successful orbit calls for more days about a time off 0 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
for j in range(20):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
for j in range(10):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
for j in range(20):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_long_time_gap(self):
"""Test successful orbit calls for many different days with a long gap
"""
self.stime += dt.timedelta(days=334)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_really_long_time_gap(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(400):
self.testInst.orbits.next()
for j in range(400):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_multiple_time_gaps(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
n_time = []
p_time = []
for j in range(40):
n_time.append(self.testInst.index[0])
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
p_time.append(self.testInst.index[0])
check = np.all(p_time == n_time[::-1])
assert all(control.data == self.testInst.data) & check
class TestGeneralOrbitsMLTxarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsNonStandardIteration():
"""Create an iteration window that is larger than step size.
Ensure the overlapping data doesn't end up in the orbit iteration."""
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.bounds = (self.testInst.files.files.index[0],
self.testInst.files.files.index[11],
'2D', dt.timedelta(days=3))
self.orbit_starts = []
self.orbit_stops = []
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.orbit_starts, self.orbit_stops
def test_no_orbit_overlap_with_overlapping_iteration(self):
"""Ensure error when overlap in iteration data."""
with pytest.raises(ValueError):
self.testInst.orbits.next()
return
@pytest.mark.parametrize("bounds_type", ['by_date', 'by_file'])
def test_no_orbit_overlap_with_nonoverlapping_iteration(self, bounds_type):
"""Test no orbit data overlap when overlap in iteration data"""
if bounds_type == 'by_date':
bounds = (self.testInst.files.files.index[0],
self.testInst.files.files.index[11],
'2D', dt.timedelta(days=2))
elif bounds_type == 'by_file':
bounds = (self.testInst.files[0], self.testInst.files[11], 2, 2)
self.testInst.bounds = bounds
for inst in self.testInst.orbits:
self.orbit_starts.append(inst.index[0])
self.orbit_stops.append(inst.index[-1])
self.orbit_starts = pds.Series(self.orbit_starts)
self.orbit_stops = pds.Series(self.orbit_stops)
assert self.orbit_starts.is_monotonic_increasing
assert self.orbit_stops.is_monotonic_increasing
return
class TestGeneralOrbitsLong(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLongxarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsOrbitNumber(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsOrbitNumberXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLatitude(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLatitudeXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
def filter_data(inst):
"""Remove data from instrument, simulating gaps"""
times = [[dt.datetime(2009, 1, 1, 1, 37), dt.datetime(2009, 1, 1, 3, 14)],
[dt.datetime(2009, 1, 1, 10), dt.datetime(2009, 1, 1, 12)],
[dt.datetime(2009, 1, 1, 22), dt.datetime(2009, 1, 2, 2)],
[dt.datetime(2009, 1, 13), dt.datetime(2009, 1, 15)],
[dt.datetime(2009, 1, 20, 1), dt.datetime(2009, 1, 25, 23)],
[dt.datetime(2009, 1, 25, 23, 30), dt.datetime(2009, 1, 26, 3)]
]
for time in times:
idx, = np.where((inst.index > time[1]) | (inst.index < time[0]))
inst.data = inst[idx]
def filter_data2(inst, times=None):
"""Remove data from instrument, simulating gaps"""
for time in times:
idx, = np.where((inst.index > time[1]) | (inst.index < time[0]))
inst.data = inst[idx]
class TestOrbitsGappyData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyData2(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'})
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
times = [[dt.datetime(2008, 12, 31, 4),
dt.datetime(2008, 12, 31, 5, 37)],
[dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 1, 37)]]
for seconds in np.arange(38):
day = (dt.datetime(2009, 1, 2)
+ dt.timedelta(days=int(seconds)))
times.append([day, day
+ dt.timedelta(hours=1, minutes=37,
seconds=int(seconds))
- dt.timedelta(seconds=20)])
self.testInst.custom_attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyData2Xarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'})
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
times = [[dt.datetime(2008, 12, 31, 4),
dt.datetime(2008, 12, 31, 5, 37)],
[dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 1, 37)]]
for seconds in np.arange(38):
day = (dt.datetime(2009, 1, 2)
+ dt.timedelta(days=int(seconds)))
times.append([day, day
+ dt.timedelta(hours=1, minutes=37,
seconds=int(seconds))
- dt.timedelta(seconds=20)])
self.testInst.custom_attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyLongData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyLongDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitNumData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitNumDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitLatData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitLatDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
|
1648704
|
import os
import json
import kfp
import fire
from datetime import datetime
def update_op_project_id_img(op):
project_id = os.getenv('PROJECT_ID')
if not project_id:
raise Exception('Please set an $PROJECT_ID env value.')
img = op.component_spec.implementation.container.image
img = img.format(PROJECT_ID=project_id)
op.component_spec.implementation.container.image = img
return op
def get_pipe_by_name(client, name):
# Tries to read pipeline. If fails, assumes pipe doesnt exist.
try:
pipes = client.list_pipelines()
pipeline = [pipe for pipe in pipes.pipelines if pipe.name == name]
except Exception:
pipeline = None
if pipeline:
pipeline = pipeline[0]
return pipeline
def deploy_pipeline(host, version):
client = kfp.Client(host=host)
name = f'pysearchml_{version}'
# Supposed page_token is not necessary for this application
pipeline = get_pipe_by_name(client, name)
if not pipeline:
pipeline = client.upload_pipeline(
pipeline_package_path='pipeline.tar.gz',
pipeline_name=name
)
def run_experiment(host, version, experiment_name):
client = kfp.Client(host=host)
name = f'pysearchml_{version}'
pipeline = get_pipe_by_name(client, name)
if not pipeline:
raise Exception('Please first create a pipeline before running')
run_id = f'experiment_{datetime.now().strftime("%Y%m%d-%H%M%S")}'
experiment = client.create_experiment(name=experiment_name)
params = json.loads(open('params.json').read())
client.run_pipeline(experiment.id, job_name=run_id, params=params,
pipeline_id=pipeline.id)
def main(action, host, **kwargs):
if action == 'deploy-pipeline':
version = kwargs.get('version')
deploy_pipeline(host, version)
elif action == 'run-pipeline':
experiment_name = kwargs['experiment_name']
run_experiment(experiment_name)
else:
raise ValueError(f'Invalid operation name: {action}.')
if __name__ == '__main__':
fire.Fire(main)
|
1648749
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def read_data(input_file, index):
# Read the data from the input file
input_data = np.loadtxt(input_file, delimiter=',')
# Lambda function to convert strings to Pandas date format
to_date = lambda x, y: str(int(x)) + '-' + str(int(y))
# Extract the start date
start = to_date(input_data[0, 0], input_data[0, 1])
# Extract the end date
if input_data[-1, 1] == 12:
year = input_data[-1, 0] + 1
month = 1
else:
year = input_data[-1, 0]
month = input_data[-1, 1] + 1
end = to_date(year, month)
# Create a date list with a monthly frequency
date_indices = pd.date_range(start, end, freq='M')
# Add timestamps to the input data to create time-series data
output = pd.Series(input_data[:, index], index=date_indices)
return output
if __name__=='__main__':
# Input filename
input_file = 'data_2D.txt'
# Specify the columns that need to be converted
# into time-series data
indices = [2, 3]
# Iterate through the columns and plot the data
for index in indices:
# Convert the column to timeseries format
timeseries = read_data(input_file, index)
# Plot the data
plt.figure()
timeseries.plot()
plt.title('Dimension ' + str(index - 1))
plt.show()
|
1648796
|
from typing import List
import pandas as pd
from ..util import io
__all__ = [
"read_acc_file_into_df",
"read_bvp_file_into_df",
"read_eda_file_into_df",
"read_hr_file_into_df",
"read_ibi_file_into_df",
"read_temp_file_into_df",
]
def read_hr_file_into_df(filepath_or_buffer) -> pd.DataFrame:
return __read_frequency_based_file_into_df(filepath_or_buffer, ['hr'])
def read_eda_file_into_df(filepath_or_buffer) -> pd.DataFrame:
return __read_frequency_based_file_into_df(filepath_or_buffer, ['eda'])
def read_bvp_file_into_df(filepath_or_buffer) -> pd.DataFrame:
return __read_frequency_based_file_into_df(filepath_or_buffer, ['bvp'])
def read_temp_file_into_df(filepath_or_buffer) -> pd.DataFrame:
return __read_frequency_based_file_into_df(filepath_or_buffer, ['temp'])
def read_acc_file_into_df(filepath_or_buffer) -> pd.DataFrame:
return __read_frequency_based_file_into_df(filepath_or_buffer, ['acc_x', 'acc_y', 'acc_z'])
def __read_frequency_based_file_into_df(filepath_or_buffer, column_names: List[str]) -> pd.DataFrame:
if io.is_file_like(filepath_or_buffer):
file_to_read = filepath_or_buffer
close_file = False
elif isinstance(filepath_or_buffer, str):
file_to_read = open(filepath_or_buffer, 'br')
close_file = True
else:
raise ValueError('Illegal input type: %d' % type(filepath_or_buffer))
initial_pos = file_to_read.tell()
timestamp = pd.to_datetime(float(str(file_to_read.readline(), 'utf-8').strip().split(',')[0]), unit='s', utc=True)
frequency = float(str(file_to_read.readline(), 'utf-8').strip().split(',')[0])
file_to_read.seek(initial_pos)
data = pd.read_csv(file_to_read, skiprows=2, names=column_names, index_col=False)
data.index = pd.date_range(start=timestamp, periods=len(data), freq=str(1 / frequency * 1000) + 'ms',
name='datetime', tz='UTC')
data.sort_index(inplace=True)
if close_file:
file_to_read.close()
return data
def read_ibi_file_into_df(filepath_or_buffer) -> pd.DataFrame:
"""
Reads an Empatica IBI file into a DataFrame.
Parameters
----------
filepath_or_buffer
filepath as string or buffer (file)
Returns
-------
IBIs : pd.DataFrame
a pd.DataFrame with an 'ibi' columns, IBIs in milliseconds
"""
if io.is_file_like(filepath_or_buffer):
file_to_read = filepath_or_buffer
close_file = False
elif isinstance(filepath_or_buffer, str):
file_to_read = open(filepath_or_buffer, 'br')
close_file = True
else:
raise ValueError('Illegal input type: %d' % type(filepath_or_buffer))
initial_pos = file_to_read.tell()
timestamp = float(str(file_to_read.readline(), 'utf-8').strip().split(',')[0])
file_to_read.seek(initial_pos)
ibi = pd.read_csv(file_to_read, skiprows=1, names=['ibi'], index_col=0)
ibi['ibi'] *= 1000 # to get ms
ibi.index = pd.to_datetime((ibi.index * 1000 + timestamp * 1000).map(int), unit='ms', utc=True)
ibi.index.name = 'datetime'
if close_file:
file_to_read.close()
return ibi
|
1648829
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import encoder, bts
from ..utils import load_weights
from utils import log_info
class Model(nn.Module):
def __init__(self, dataset, max_depth, model_weights_file, seed=None):
super(Model, self).__init__()
if model_weights_file != 'scratch':
assert os.path.exists(model_weights_file)
from_scratch = False
else:
from_scratch = True
if seed is not None:
torch.manual_seed(seed)
self.encoder = encoder()
self.decoder = bts(self.encoder.feat_out_channels, dataset, max_depth)
self.dataset = dataset
if not from_scratch:
load_weights(self, model_weights_file)
else:
log_info('START FROM SCRATCH')
def forward(self, x, focal=None):
if focal is None:
assert self.dataset == 'nyuv2'
focal = 518.8579
skip_feat = self.encoder(x)
depth_8x8_scaled, depth_4x4_scaled, depth_2x2_scaled, reduc1x1, final_depth = self.decoder(skip_feat, focal)
return final_depth
def get_features(self, x, layer_list, focal=None, is_training=False):
return_list = []
if self.dataset == 'nyuv2':
focal = 518.8579
def forward(x, layer_list):
skip_feat = self.encoder(x)
skip0, skip1, skip2, skip3 = skip_feat[1], skip_feat[2], skip_feat[3], skip_feat[4] # 64, 256, 512, 1024 | H/2, H/4, H/8, H/16
dense_features = torch.nn.ReLU()(skip_feat[5]) # B, 2048, H/32, W/32
upconv5 = self.decoder.upconv5(dense_features) # B, 512, H/16, W/16
upconv5 = self.decoder.bn5(upconv5)
concat5 = torch.cat([upconv5, skip3], dim=1) # B, 1536, H/16, W/16
iconv5 = self.decoder.conv5(concat5) # B, 512, H/16, W/16
upconv4 = self.decoder.upconv4(iconv5) # H/8
upconv4 = self.decoder.bn4(upconv4) # B, 256, H/8, W/8
concat4 = torch.cat([upconv4, skip2], dim=1) # B, 768, H/8, W/8
iconv4 = self.decoder.conv4(concat4)
iconv4 = self.decoder.bn4_2(iconv4) # B, 256, H/8, W/8
daspp_3 = self.decoder.daspp_3(iconv4) # B, 128, H/8, W/8
concat4_2 = torch.cat([concat4, daspp_3], dim=1) # B, 896, H/8, W/8
daspp_6 = self.decoder.daspp_6(concat4_2) # B, 128, H/8, W/8
concat4_3 = torch.cat([concat4_2, daspp_6], dim=1) # B, 1024, H/8, W/8
daspp_12 = self.decoder.daspp_12(concat4_3) # B, 128, H/8, W/8
concat4_4 = torch.cat([concat4_3, daspp_12], dim=1) # B, 1152, H/8, W/8
daspp_18 = self.decoder.daspp_18(concat4_4) # B, 128, H/8, W/8
concat4_5 = torch.cat([concat4_4, daspp_18], dim=1)
daspp_24 = self.decoder.daspp_24(concat4_5)
concat4_daspp = torch.cat([iconv4, daspp_3, daspp_6, daspp_12, daspp_18, daspp_24],
dim=1) # B, 896, H/8, W/8
daspp_feat = self.decoder.daspp_conv(concat4_daspp) # B, 128, H/8, W/8
reduc8x8 = self.decoder.reduc8x8(daspp_feat) # B, 4, H/8, W/8
plane_normal_8x8 = reduc8x8[:, :3, :, :] # B, 3, H/8, W/8
plane_normal_8x8 = F.normalize(plane_normal_8x8, 2, 1)
plane_dist_8x8 = reduc8x8[:, 3, :, :]
plane_eq_8x8 = torch.cat([plane_normal_8x8, plane_dist_8x8.unsqueeze(1)], 1) # B, 4, H/8, W/8
depth_8x8 = self.decoder.lpg8x8(plane_eq_8x8, focal) # B, H, W
depth_8x8_scaled = depth_8x8.unsqueeze(1) / self.decoder.max_depth
depth_8x8_scaled_ds = F.interpolate(depth_8x8_scaled, scale_factor=0.25,
mode='nearest') # B, 1, H/4, W/4
upconv3 = self.decoder.upconv3(daspp_feat) # H/4
upconv3 = self.decoder.bn3(upconv3) # B, 128, H/4, W/4
concat3 = torch.cat([upconv3, skip1, depth_8x8_scaled_ds], dim=1)
iconv3 = self.decoder.conv3(concat3) # B, 128, H/4, W/4
reduc4x4 = self.decoder.reduc4x4(iconv3) # B, 4, H/4, W/4
plane_normal_4x4 = reduc4x4[:, :3, :, :]
plane_normal_4x4 = F.normalize(plane_normal_4x4, 2, 1)
plane_dist_4x4 = reduc4x4[:, 3, :, :]
plane_eq_4x4 = torch.cat([plane_normal_4x4, plane_dist_4x4.unsqueeze(1)], 1)
depth_4x4 = self.decoder.lpg4x4(plane_eq_4x4, focal)
depth_4x4_scaled = depth_4x4.unsqueeze(1) / self.decoder.max_depth
depth_4x4_scaled_ds = F.interpolate(depth_4x4_scaled, scale_factor=0.5, mode='nearest')
upconv2 = self.decoder.upconv2(iconv3) # H/2
upconv2 = self.decoder.bn2(upconv2)
concat2 = torch.cat([upconv2, skip0, depth_4x4_scaled_ds], dim=1)
iconv2 = self.decoder.conv2(concat2) # B, 64, H/2, W/2
reduc2x2 = self.decoder.reduc2x2(iconv2) # B, 4, H/2, W/2
plane_normal_2x2 = reduc2x2[:, :3, :, :]
plane_normal_2x2 = F.normalize(plane_normal_2x2, 2, 1)
plane_dist_2x2 = reduc2x2[:, 3, :, :]
plane_eq_2x2 = torch.cat([plane_normal_2x2, plane_dist_2x2.unsqueeze(1)], 1)
depth_2x2 = self.decoder.lpg2x2(plane_eq_2x2, focal)
depth_2x2_scaled = depth_2x2.unsqueeze(1) / self.decoder.max_depth
upconv1 = self.decoder.upconv1(iconv2) # B, 32, H, W
reduc1x1 = self.decoder.reduc1x1(upconv1) # B, 1, H, W
concat1 = torch.cat([upconv1, reduc1x1, depth_2x2_scaled, depth_4x4_scaled, depth_8x8_scaled],
dim=1) # B, 36, H, W
iconv1 = self.decoder.conv1(concat1) # B, 32, H, W
final_depth = self.decoder.max_depth * self.decoder.get_depth(iconv1)
if self.dataset == 'kitti':
final_depth = final_depth * focal.view(-1, 1, 1, 1).float() / 715.0873
for layer in layer_list:
if layer == 'upconv3':
return_list.append(upconv3)
elif layer == 'iconv2':
return_list.append(iconv2)
elif layer == 'upconv2':
return_list.append(upconv2)
elif layer == 'iconv1':
return_list.append(iconv1)
elif layer == 'upconv1':
return_list.append(upconv1)
else:
raise NotImplementedError
return_list.append(final_depth)
return return_list
if is_training:
return_list = forward(x, layer_list)
else:
with torch.no_grad():
return_list = forward(x, layer_list)
return return_list
def get_baseline_loss(self, preds, depths):
if self.dataset == 'kitti':
mask = depths > 1.0
elif self.dataset == 'nyuv2':
mask = depths > 0.1
else:
raise NotImplementedError
variance_focus = 0.85
silog_criterion = silog_loss(variance_focus=variance_focus)
loss = silog_criterion.forward(preds, depths, mask.to(torch.bool))
return loss
class silog_loss(nn.Module):
def __init__(self, variance_focus):
super(silog_loss, self).__init__()
self.variance_focus = variance_focus
def forward(self, depth_est, depth_gt, mask):
d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask])
return torch.sqrt((d ** 2).mean() - self.variance_focus * (d.mean() ** 2)) * 10.0
|
1648839
|
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Int8, Int16, Int32, Int64, Schema, String
UNKNOWN_OFFSET = -1
class OffsetResetStrategy(object):
LATEST = -1
EARLIEST = -2
NONE = 0
class OffsetResponse_v0(Response):
API_KEY = 2
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offsets', Array(Int64))))))
)
class OffsetResponse_v1(Response):
API_KEY = 2
API_VERSION = 1
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64)))))
)
class OffsetResponse_v2(Response):
API_KEY = 2
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64)))))
)
class OffsetResponse_v3(Response):
"""
on quota violation, brokers send out responses before throttling
"""
API_KEY = 2
API_VERSION = 3
SCHEMA = OffsetResponse_v2.SCHEMA
class OffsetResponse_v4(Response):
"""
Add leader_epoch to response
"""
API_KEY = 2
API_VERSION = 4
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64),
('leader_epoch', Int32)))))
)
class OffsetResponse_v5(Response):
"""
adds a new error code, OFFSET_NOT_AVAILABLE
"""
API_KEY = 2
API_VERSION = 5
SCHEMA = OffsetResponse_v4.SCHEMA
class OffsetRequest_v0(Request):
API_KEY = 2
API_VERSION = 0
RESPONSE_TYPE = OffsetResponse_v0
SCHEMA = Schema(
('replica_id', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64),
('max_offsets', Int32)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v1(Request):
API_KEY = 2
API_VERSION = 1
RESPONSE_TYPE = OffsetResponse_v1
SCHEMA = Schema(
('replica_id', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v2(Request):
API_KEY = 2
API_VERSION = 2
RESPONSE_TYPE = OffsetResponse_v2
SCHEMA = Schema(
('replica_id', Int32),
('isolation_level', Int8), # <- added isolation_level
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v3(Request):
API_KEY = 2
API_VERSION = 3
RESPONSE_TYPE = OffsetResponse_v3
SCHEMA = OffsetRequest_v2.SCHEMA
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v4(Request):
"""
Add current_leader_epoch to request
"""
API_KEY = 2
API_VERSION = 4
RESPONSE_TYPE = OffsetResponse_v4
SCHEMA = Schema(
('replica_id', Int32),
('isolation_level', Int8), # <- added isolation_level
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('current_leader_epoch', Int64),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v5(Request):
API_KEY = 2
API_VERSION = 5
RESPONSE_TYPE = OffsetResponse_v5
SCHEMA = OffsetRequest_v4.SCHEMA
DEFAULTS = {
'replica_id': -1
}
OffsetRequest = [
OffsetRequest_v0, OffsetRequest_v1, OffsetRequest_v2,
OffsetRequest_v3, OffsetRequest_v4, OffsetRequest_v5,
]
OffsetResponse = [
OffsetResponse_v0, OffsetResponse_v1, OffsetResponse_v2,
OffsetResponse_v3, OffsetResponse_v4, OffsetResponse_v5,
]
|
1648841
|
import logging
import os
import png
logger = logging.getLogger('dump-asc-16')
font_file_path = 'assets/hzk-fonts/ASC16'
outputs_dir = 'outputs/png/asc/16/'
asc_glyph_bytes_length = 8 * 16 // 8
def _iter_ascii(font_file, num_start, num_stop):
count = 0
for num in range(num_start, num_stop):
c = chr(num)
uni_hex_name = f'{num:04X}'
asc_offset = num * asc_glyph_bytes_length
font_file.seek(asc_offset)
glyph_bytes = font_file.read(asc_glyph_bytes_length)
bitmap = []
for row_index in range(16):
row = []
byte = glyph_bytes[row_index]
for bit_index in range(8):
row.append(0)
row.append(0)
row.append(0)
if 0b1 << (7 - bit_index) & byte:
row.append(255)
else:
row.append(0)
bitmap.append(row)
image = png.from_array(bitmap, 'RGBA')
image.save(f'{outputs_dir}{uni_hex_name}.png')
logger.info(f'* gene png 8*16px {c if c.isprintable() else " "} - {uni_hex_name}')
count += 1
return count
def run():
if not os.path.exists(outputs_dir):
os.makedirs(outputs_dir)
with open(font_file_path, 'rb') as font_file:
logger.info('----> dump asc 16')
assert _iter_ascii(font_file, 0, 256) == 256
|
1648849
|
from rapid_response_kit.utils.clients import twilio, pusher_connect
from rapid_response_kit.utils.helpers import (
parse_numbers,
echo_twimlet,
twilio_numbers,
check_is_valid_url
)
from clint.textui import colored
from twilio.twiml import Response
from pusher import Pusher
from flask import render_template, request, flash, redirect
def install(app):
if pusher_connect(app.config):
app.config.apps.register('noticeboard', 'Noticeboard', '/noticeboard')
else:
print(colored.red(
'''
Noticeboard requires Pusher credentials.
Please add PUSHER_APP_ID, PUSHER_KEY and PUSHER_SECRET
to rapid_response_kit/utils/config.py'''))
return
@app.route('/noticeboard', methods=['GET'])
def show_noticeboard():
numbers = twilio_numbers()
client = twilio()
# Build a list of numbers that are being used for Noticeboard
noticeboard_numbers = []
for p in client.phone_numbers.list():
if '[RRKit] Noticeboard' in p.friendly_name:
noticeboard_numbers.append(p.phone_number)
return render_template(
"noticeboard.html",
url='{0}/live'.format(request.base_url),
numbers=numbers,
noticeboards=noticeboard_numbers
)
@app.route('/noticeboard', methods=['POST'])
def do_noticeboard():
client = twilio()
url = "{0}noticeboard/post".format(request.url_root)
client.phone_numbers.update(request.form['twilio_number'],
sms_url=url,
sms_method='POST',
friendly_name='[RRKit] Noticeboard')
from_number = client.phone_numbers.get(request.form['twilio_number'])
live_url = '{0}noticeboard/live/{1}'.format(
request.url_root,
from_number.phone_number
)
numbers = parse_numbers(request.form['numbers'])
body = request.form.get('message', '').replace('{URL}', live_url)
media = check_is_valid_url(request.form.get('media', ''))
for num in numbers:
client.messages.create(
to=num,
from_=from_number.phone_number,
body=body,
media_url=media
)
flash('Sent {0} the message'.format(num), 'success')
return redirect('/noticeboard')
@app.route('/noticeboard/post', methods=['POST'])
def handle_noticeboard_inbound():
pusher_key = app.config.get('PUSHER_KEY', None)
pusher_secret = app.config.get('PUSHER_SECRET', None)
pusher_app_id = app.config.get('PUSHER_APP_ID', None)
try:
p = Pusher(pusher_app_id, pusher_key, pusher_secret)
p['rrk_noticeboard_live'].trigger(
'new_message',
{
'image': request.values.get('MediaUrl0', None),
'body': request.values.get('Body', None),
'from': request.values.get('From', None)
}
)
except:
return '<Response />'
to = request.values.get('To', '')
r = Response()
r.message(
'''Thank you, your image has been posted
to {0}noticeboard/live/{1}'''.format(request.url_root, to))
return r.toxml()
@app.route('/noticeboard/live/<number>', methods=['GET'])
def show_noticeboard_live(number=None):
pusher_key = app.config.get('PUSHER_KEY', '')
twilio_client = twilio()
try:
cleaned_number = number
except:
flash('We did not receive a correct number', 'danger')
return redirect('/noticeboard')
# Build a list of messages to our number that has media attached
msgs = []
for m in twilio_client.messages.list(to=cleaned_number):
if int(m.num_media) > 0:
msgs.append(m)
'''
Super janky because media is seperate from message resources.
Let's mash the bits we want together and then add them to a list
- <NAME>
'''
msg_media_list = []
for m in msgs:
d = {}
d['image_url'] = twilio_client.media(m.sid).list()[0].uri
d['body'] = m.body
d['from'] = m.from_
msg_media_list.append(d)
return render_template(
'noticeboard_live.html',
pusher_key=pusher_key,
messages=msg_media_list,
number=number
)
|
1648855
|
import unittest
import numpy as np
import bayesnet as bn
class TestProduct(unittest.TestCase):
def test_product(self):
arrays = [
1,
np.arange(1, 5),
np.arange(1, 7).reshape(2, 3),
np.arange(1, 7).reshape(2, 3, 1)
]
axes = [
None,
None,
1,
(0, 2)
]
keepdims = [
False,
False,
True,
False
]
grads = [
1,
np.array([24., 12., 8., 6.]),
np.array([
[6., 3., 2.],
[30., 24., 20.]
]),
np.array([4., 5., 6., 1., 2., 3.]).reshape(2, 3, 1)
]
for arr, ax, keep, g in zip(arrays, axes, keepdims, grads):
a = bn.Parameter(arr)
b = a.prod(ax, keep)
b.backward(np.ones(b.shape))
if isinstance(g, int):
self.assertEqual(g, a.grad)
else:
self.assertTrue((g == a.grad).all())
if __name__ == '__main__':
unittest.main()
|
1648864
|
from neuralogic import get_neuralogic, get_gateway
from neuralogic.core.settings import SettingsProxy
from typing import List
class Sources:
@staticmethod
def from_settings(settings: SettingsProxy) -> "Sources":
neuralogic = get_neuralogic()
sources = neuralogic.cz.cvut.fel.ida.setup.Sources(settings.settings)
return Sources(sources)
@staticmethod
def from_args(args: List[str], settings: SettingsProxy) -> "Sources":
neuralogic = get_neuralogic()
gateway = get_gateway()
jargs = gateway.new_array(gateway.jvm.java.lang.String, len(args))
for i, item in enumerate(args):
jargs[i] = item
sources = neuralogic.cz.cvut.fel.ida.neuralogic.cli.utils.Runner.getSources(jargs, settings.settings)
return Sources(sources)
def __init__(self, sources):
self.sources = sources
def to_json(self) -> str:
return self.sources.exportToJson()
|
1648867
|
from KratosMultiphysics import *
from KratosMultiphysics.FluidDynamicsApplication import *
import KratosMultiphysics.KratosUnittest as KratosUnittest
import random
class AdjointVMSElement2D(KratosUnittest.TestCase):
def setUp(self):
self.delta_time = 1.0
# create test model part
self.model = Model()
self.model_part = self.model.CreateModelPart("test")
self.model_part.AddNodalSolutionStepVariable(VELOCITY)
self.model_part.AddNodalSolutionStepVariable(ACCELERATION)
self.model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
self.model_part.AddNodalSolutionStepVariable(PRESSURE)
self.model_part.AddNodalSolutionStepVariable(VISCOSITY)
self.model_part.AddNodalSolutionStepVariable(DENSITY)
self.model_part.AddNodalSolutionStepVariable(BODY_FORCE)
self.model_part.CreateNewNode(1, 0.0, 0.0, 0.0)
self.model_part.CreateNewNode(2, 1.0, 0.0, 0.0)
self.model_part.CreateNewNode(3, 1.0, 1.0, 0.0)
prop = self.model_part.GetProperties()[0]
self.model_part.CreateNewElement("VMS2D3N", 1, [1, 2, 3], prop)
self.model_part.CreateNewElement("VMSAdjointElement2D", 2, [1, 2, 3], prop)
self.model_part.SetBufferSize(2)
self.model_part.ProcessInfo[OSS_SWITCH] = 0
self.model_part.ProcessInfo[DELTA_TIME] = self.delta_time
self.model_part.ProcessInfo[DYNAMIC_TAU] = 1.0
self.vms_element = self.model_part.GetElement(1)
self.adjoint_element = self.model_part.GetElement(2)
self._AssignSolutionStepData1(0)
self._AssignSolutionStepData2(1)
def _AssignSolutionStepData1(self, step=0):
# generate nodal solution step test data
random.seed(1.0)
for node in self.model_part.Nodes:
node.SetSolutionStepValue(DENSITY,step,1.0)
node.SetSolutionStepValue(VISCOSITY,step,1.0e-5)
node.SetSolutionStepValue(VELOCITY_X,step,random.random())
node.SetSolutionStepValue(VELOCITY_Y,step,random.random())
node.SetSolutionStepValue(ACCELERATION_X,step,random.random())
node.SetSolutionStepValue(ACCELERATION_Y,step,random.random())
node.SetSolutionStepValue(PRESSURE,step,random.random())
def _AssignSolutionStepData2(self, step=0):
# generate nodal solution step test data
random.seed(2.0)
for node in self.model_part.Nodes:
node.SetSolutionStepValue(DENSITY,step,1.0)
node.SetSolutionStepValue(VISCOSITY,step,1.0e-5)
node.SetSolutionStepValue(VELOCITY_X,step,random.random())
node.SetSolutionStepValue(VELOCITY_Y,step,random.random())
node.SetSolutionStepValue(ACCELERATION_X,step,random.random())
node.SetSolutionStepValue(ACCELERATION_Y,step,random.random())
node.SetSolutionStepValue(PRESSURE,step,random.random())
def _zeroVector(self,size):
v = Vector(size)
for i in range(size):
v[i] = 0.0
return v
def _transpose(self, m):
tmp = Matrix(m.Size1(), m.Size2())
for i in range(m.Size1()):
for j in range(m.Size2()):
tmp[i,j] = m[j,i]
return tmp
def _assertMatrixAlmostEqual(self, matrix1, matrix2, prec=7):
self.assertEqual(matrix1.Size1(), matrix2.Size1())
self.assertEqual(matrix1.Size2(), matrix2.Size2())
for i in range(matrix1.Size1()):
for j in range(matrix1.Size2()):
self.assertAlmostEqual(matrix1[i,j], matrix2[i,j], prec)
def testCalculateSecondDerivativesLHS(self):
Mass1 = Matrix(9,9)
self.model_part.ProcessInfo[DELTA_TIME] = self.delta_time
self.vms_element.CalculateMassMatrix(Mass1,self.model_part.ProcessInfo)
self.model_part.ProcessInfo[DELTA_TIME] =-self.delta_time
mass2_trans = Matrix(9,9)
self.adjoint_element.CalculateSecondDerivativesLHS(mass2_trans,self.model_part.ProcessInfo)
self._assertMatrixAlmostEqual(Mass1, self._transpose(mass2_trans)*(-1.0))
def testCalculateFirstDerivativesLHS1(self):
# test for steady state.
for node in self.model_part.Nodes:
for step in range(2):
node.SetSolutionStepValue(ACCELERATION_X, step, 0.0)
node.SetSolutionStepValue(ACCELERATION_Y, step, 0.0)
# unperturbed residual
LHS = Matrix(9,9)
RHS = self._zeroVector(9)
FirstDerivatives = Vector(9)
self.model_part.ProcessInfo[DELTA_TIME] = self.delta_time
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
res0 = LHS * FirstDerivatives
# finite difference approximation
h = 0.0000001
FDAdjointMatrix = Matrix(9,9)
row_index = 0
for node in self.model_part.Nodes:
# VELOCITY_X
vx = node.GetSolutionStepValue(VELOCITY_X,0)
node.SetSolutionStepValue(VELOCITY_X,0,vx+h)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
node.SetSolutionStepValue(VELOCITY_X,0,vx)
res = LHS * FirstDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# VELOCITY_Y
vy = node.GetSolutionStepValue(VELOCITY_Y,0)
node.SetSolutionStepValue(VELOCITY_Y,0,vy+h)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
node.SetSolutionStepValue(VELOCITY_Y,0,vy)
res = LHS * FirstDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# PRESSURE
p = node.GetSolutionStepValue(PRESSURE,0)
node.SetSolutionStepValue(PRESSURE,0,p+h)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
node.SetSolutionStepValue(PRESSURE,0,p)
res = LHS * FirstDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# analytical implementation
self.model_part.ProcessInfo[DELTA_TIME] =-self.delta_time
AdjointMatrix = Matrix(9,9)
self.adjoint_element.CalculateFirstDerivativesLHS(AdjointMatrix,self.model_part.ProcessInfo)
self._assertMatrixAlmostEqual(FDAdjointMatrix, AdjointMatrix)
# reset test data
self._AssignSolutionStepData1(0)
self._AssignSolutionStepData2(1)
def testCalculateFirstDerivativesLHS2(self):
# unperturbed residual
Mass = Matrix(9,9)
LHS = Matrix(9,9)
RHS = self._zeroVector(9)
FirstDerivatives = Vector(9)
SecondDerivatives = Vector(9)
self.model_part.ProcessInfo[DELTA_TIME] = self.delta_time
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
self.vms_element.GetSecondDerivativesVector(SecondDerivatives,0)
res0 = LHS * FirstDerivatives + Mass * SecondDerivatives
# finite difference approximation
h = 0.0000001
FDAdjointMatrix = Matrix(9,9)
row_index = 0
for node in self.model_part.Nodes:
# VELOCITY_X
vx = node.GetSolutionStepValue(VELOCITY_X,0)
node.SetSolutionStepValue(VELOCITY_X,0,vx+h)
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
self.vms_element.GetSecondDerivativesVector(SecondDerivatives,0)
node.SetSolutionStepValue(VELOCITY_X,0,vx)
res = LHS * FirstDerivatives + Mass * SecondDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# VELOCITY_Y
vy = node.GetSolutionStepValue(VELOCITY_Y,0)
node.SetSolutionStepValue(VELOCITY_Y,0,vy+h)
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
self.vms_element.GetSecondDerivativesVector(SecondDerivatives,0)
node.SetSolutionStepValue(VELOCITY_Y,0,vy)
res = LHS * FirstDerivatives + Mass * SecondDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# PRESSURE
p = node.GetSolutionStepValue(PRESSURE,0)
node.SetSolutionStepValue(PRESSURE,0,p+h)
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
self.vms_element.GetSecondDerivativesVector(SecondDerivatives,0)
node.SetSolutionStepValue(PRESSURE,0,p)
res = LHS * FirstDerivatives + Mass * SecondDerivatives
for j in range(9):
FDAdjointMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# analytical implementation
self.model_part.ProcessInfo[DELTA_TIME] =-self.delta_time
AdjointMatrix = Matrix(9,9)
self.adjoint_element.CalculateFirstDerivativesLHS(AdjointMatrix,self.model_part.ProcessInfo)
self._assertMatrixAlmostEqual(FDAdjointMatrix, AdjointMatrix)
def testCalculateSensitivityMatrix(self):
# unperturbed residual
Mass = Matrix(9,9)
LHS = Matrix(9,9)
RHS = self._zeroVector(9)
FirstDerivatives = Vector(9)
SecondDerivatives = Vector(9)
self.model_part.ProcessInfo[DELTA_TIME] = self.delta_time
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
self.vms_element.GetFirstDerivativesVector(FirstDerivatives,0)
self.vms_element.GetSecondDerivativesVector(SecondDerivatives,0)
res0 = LHS * FirstDerivatives + Mass * SecondDerivatives
# finite difference approximation
h = 0.00000001
FDShapeDerivativeMatrix = Matrix(6,9)
row_index = 0
for node in self.model_part.Nodes:
# X
x = node.X
node.X = x+h
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
node.X = x
res = LHS * FirstDerivatives + Mass * SecondDerivatives
for j in range(9):
FDShapeDerivativeMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# Y
y = node.Y
node.Y = y+h
self.vms_element.CalculateMassMatrix(Mass,self.model_part.ProcessInfo)
self.vms_element.CalculateLocalVelocityContribution(LHS,RHS,self.model_part.ProcessInfo)
node.Y = y
res = LHS * FirstDerivatives + Mass * SecondDerivatives
for j in range(9):
FDShapeDerivativeMatrix[row_index,j] = -(res[j] - res0[j]) / h
row_index = row_index + 1
# analytical implementation
self.model_part.ProcessInfo[DELTA_TIME] =-self.delta_time
ShapeDerivativeMatrix = Matrix(6,9)
self.adjoint_element.CalculateSensitivityMatrix(SHAPE_SENSITIVITY,ShapeDerivativeMatrix,self.model_part.ProcessInfo)
self._assertMatrixAlmostEqual(FDShapeDerivativeMatrix, ShapeDerivativeMatrix)
if __name__ == '__main__':
KratosUnittest.main()
|
1648889
|
from specter import Spec, expect
from alchemize import Attr, JsonMappedModel
from alchemize.mapping import get_key_paths, get_normalized_map
class TestModel(JsonMappedModel):
__mapping__ = {
'thing': Attr('thing', str),
'old_style': ['thing', str],
}
class SampleMapping(JsonMappedModel):
__mapping__ = {
'top_lvl': Attr('top_lvl', str),
'model': Attr('model', TestModel),
'model_list': Attr('model_list', [TestModel]),
}
class TestMapping(Spec):
def can_get_key_paths(self):
key_list = get_key_paths(SampleMapping)
expect('/top_lvl').to.be_in(key_list)
expect('/model/thing').to.be_in(key_list)
expect('/model/old_style').to.be_in(key_list)
expect('/model_list/thing').to.be_in(key_list)
def can_have_handle_model_being_none(self):
ret = get_normalized_map(None)
expect(ret).to.equal({})
|
1648890
|
from plugins.base import Base
from utils import fmt
import gevent
class Welcome(Base):
def on_member_join(self, guild, member):
welcome_message = fmt(guild.storage.get('welcome_message'),
server=guild.name,
user=member.mention)
announcement_channel = guild.storage.get('channel_name')
private = guild.storage.get('private')
destination = announcement_channel or guild.id
if private:
destination = member.id
self.send_message(destination, welcome_message)
def on_member_remove(self, guild, member):
gb_message = guild.storage.get('gb_message')
if guild.storage.get('gb_disabled') or not gb_message:
return
gb_message = fmt(gb_message,
server=guild.name,
user=member.name)
channel = guild.storage.get('channel_name')
destination = channel or guild.id
self.send_message(destination, gb_message)
|
1648895
|
import logging
import click
from regparser.diff.tree import changes_between
from regparser.index import dependency, entry
logger = logging.getLogger(__name__)
@click.command()
@click.argument('cfr_title', type=int)
@click.argument('cfr_part', type=int)
def diffs(cfr_title, cfr_part):
"""Construct diffs between known trees."""
logger.info("Build diffs - %s Part %s", cfr_title, cfr_part)
tree_dir = entry.FrozenTree(cfr_title, cfr_part)
diff_dir = entry.Diff(cfr_title, cfr_part)
pairs = [(l.path[-1], r.path[-1])
for l in tree_dir.sub_entries()
for r in tree_dir.sub_entries()]
deps = dependency.Graph()
for lhs_id, rhs_id in pairs:
deps.add(diff_dir / lhs_id / rhs_id, tree_dir / lhs_id)
deps.add(diff_dir / lhs_id / rhs_id, tree_dir / rhs_id)
trees = {}
for lhs_id, rhs_id in pairs:
path = diff_dir / lhs_id / rhs_id
deps.validate_for(path)
if deps.is_stale(path):
if lhs_id not in trees:
trees[lhs_id] = (tree_dir / lhs_id).read()
if rhs_id not in trees:
trees[rhs_id] = (tree_dir / rhs_id).read()
path.write(dict(changes_between(trees[lhs_id], trees[rhs_id])))
|
1648896
|
import numpy as np
import pickle
class CorrelationList:
def __init__(self, shape):
self._sumx = np.zeros(shape, dtype=float)
self._sumy = np.zeros(shape, dtype=float)
self._sumxy = np.zeros(shape, dtype=float)
self._sumxsq = np.zeros(shape, dtype=float)
self._sumysq = np.zeros(shape, dtype=float)
self._n = np.zeros(shape, dtype=float) # TODO: Since this should be the same for every point, we can maybe use a single point for it
def __getitem__(self, key):
if isinstance(key, int) or isinstance(key, tuple) or isinstance(key, list):
num = self._sumxy[key] - (self._sumx[key] * self._sumy[key] / self._n[key])
denom1 = self._sumxsq[key] - (self._sumx[key]**2 / self._n[key])
denom2 = self._sumysq[key] - (self._sumy[key]**2 / self._n[key])
corr = num / np.maximum(np.sqrt(denom1 * denom2), 1e-15)
return corr
if isinstance(key, slice):
raise NotImplementedError
else:
raise TypeError
def update(self, key, x, y):
self._sumx[key] += np.sum(x)
self._sumy[key] += np.sum(y)
self._sumxy[key] += np.dot(x, y)
self._sumxsq[key] += np.sum(np.square(x))
self._sumysq[key] += np.sum(np.square(y))
self._n[key] += len(x)
def merge(self, correlation_array):
if isinstance(correlation_array, CorrelationList):
self._sumx += correlation_array._sumx
self._sumy += correlation_array._sumy
self._sumxy += correlation_array._sumxy
self._sumxsq += correlation_array._sumxsq
self._sumysq += correlation_array._sumysq
self._n += correlation_array._n
else:
raise TypeError
def save(self):
pickle.dump(self, open("/tmp/correlations.p", "wb"))
|
1648966
|
from haystack import indexes
from regcore import models
class DocumentIndex(indexes.Indexable, indexes.SearchIndex):
"""Search index used by Haystack"""
doc_type = indexes.CharField(model_attr='doc_type')
version = indexes.CharField(model_attr='version', null=True)
label_string = indexes.CharField(model_attr='label_string')
text = indexes.CharField(model_attr='text')
is_root = indexes.BooleanField(model_attr='root')
is_subpart = indexes.BooleanField()
title = indexes.MultiValueField()
regulation = indexes.CharField(model_attr='label_string')
text = indexes.CharField(document=True, use_template=True)
def prepare_regulation(self, obj):
return obj.label_string.split('-')[0]
def prepare_is_subpart(self, obj):
return (
'Subpart' in obj.label_string or
'Subjgrp' in obj.label_string
)
def prepare_title(self, obj):
"""For compatibility reasons, we make this a singleton list"""
if obj.title:
return [obj.title]
else:
return []
def get_model(self):
return models.Document
|
1648969
|
from passlib.hash import sha512_crypt
s = "penguins"
for ip in range(1000,2000):
sp = str(ip)
p = sp[1:]
h = sha512_crypt.using(salt=s, rounds=5000).hash(p)
if h[12:16] == "PcSL":
print p, h
|
1649014
|
from pypika import (
Parameter,
PostgreSQLQuery,
Table,
terms,
Dialects,
)
from pypika.terms import Node
from pypika.utils import format_quotes
from .base import Database
class DateTrunc(terms.Function):
"""
Wrapper for the PostgreSQL date_trunc function
"""
def __init__(self, field, date_format, alias=None):
super(DateTrunc, self).__init__('DATE_TRUNC', date_format, field, alias=alias)
class PostgreSQLTimestamp(Node):
def __init__(self, timestamp):
self.timestamp = timestamp
def get_sql(self, secondary_quote_char: str = "'", **kwargs) -> str:
formatted_timestamp = format_quotes(self.timestamp.isoformat(), secondary_quote_char)
return f"TIMESTAMP {formatted_timestamp}"
class PostgresDateAdd(terms.Function):
def __init__(self, field, date_part, interval):
wrapped_field = self.wrap_constant(field, PostgreSQLTimestamp)
if date_part == "quarter":
date_part = "month"
interval *= 3
interval_term = terms.Interval(**{f'{str(date_part)}s': interval, 'dialect': Dialects.POSTGRESQL})
super().__init__('DATEADD', wrapped_field, interval_term)
def get_function_sql(self, **kwargs):
return " + ".join(self.get_arg_sql(arg, **kwargs) for arg in self.args)
class PostgreSQLDatabase(Database):
"""
PostgreSQL client that uses the psycopg module.
"""
# The pypika query class to use for constructing queries
query_cls = PostgreSQLQuery
def __init__(self, host="localhost", port=5432, database=None, user=None, password=None, **kwargs):
super().__init__(host, port, database, **kwargs)
self.user = user
self.password = password
def connect(self):
import psycopg2
return psycopg2.connect(
host=self.host,
port=self.port,
dbname=self.database,
user=self.user,
password=self.password,
)
def trunc_date(self, field, interval):
return DateTrunc(field, str(interval))
def date_add(self, field, date_part, interval):
return PostgresDateAdd(field, date_part, interval)
def get_column_definitions(self, schema, table, connection=None):
columns = Table("columns", schema="information_schema")
columns_query = (
PostgreSQLQuery.from_(columns, immutable=False)
.select(columns.column_name, columns.data_type)
.where(columns.table_schema == Parameter('%(schema)s'))
.where(columns.field("table_name") == Parameter('%(table)s'))
.distinct()
.orderby(columns.column_name)
)
return self.fetch(str(columns_query), parameters=dict(schema=schema, table=table), connection=connection)
|
1649047
|
memory = {}
"""
The N'th Fibonacci number is
F(n) = F(n-1) + F(n-2)
with F(0) = 1, F(1) = 1
"""
def fib(n):
global memory
if n not in memory:
if n >= 2:
memory[n] = fib(n-2) + fib(n-1)
else:
memory[n] = 1
return memory[n]
|
1649067
|
from typing import Any
from gogettr.api import ApiClient
class Capability:
"""Provides base functionality for the individual capabilities."""
def __init__(self, client: ApiClient):
self.client = client
def pull(self, *args, **kwargs) -> Any:
"""Pull the desired data from GETTR."""
raise NotImplementedError
|
1649077
|
from django.urls import path, include
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
path('api/', include('api.urls'))
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static('/media/', document_root=settings.MEDIA_ROOT)
|
1649082
|
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from PIL import Image
import os
import random
class TumorDataset(Dataset):
""" Returns a TumorDataset class object which represents our tumor dataset.
TumorDataset inherits from torch.utils.data.Dataset class.
"""
def __init__(self, root_dir, transform=True, DEBUG=False):
""" Constructor for our TumorDataset class.
Parameters:
root_dir(str): Directory with all the images.
transform(bool): Flag to apply image random transformation.
DEBUG(bool): To switch to debug mode for image transformation.
Returns: None
"""
self.root_dir = root_dir
self.transform = {'hflip': TF.hflip,
'vflip': TF.vflip,
'rotate': TF.rotate}
self.default_transformation = transforms.Compose([
transforms.Grayscale(),
transforms.Resize((512, 512))
])
self.DEBUG = DEBUG
if not transform:
self.transform = None
def __getitem__(self, index):
""" Overridden method from inheritted class to support
indexing of dataset such that datset[I] can be used
to get Ith sample.
Parameters:
index(int): Index of the dataset sample
Return:
sample(dict): Contains the index, image, mask torch.Tensor.
'index': Index of the image.
'image': Contains the tumor image torch.Tensor.
'mask' : Contains the mask image torch.Tensor.
"""
image_name = os.path.join(self.root_dir, str(index)+'.png')
mask_name = os.path.join(self.root_dir, str(index)+'_mask.png')
image = Image.open(image_name)
mask = Image.open(mask_name)
image = self.default_transformation(image)
mask = self.default_transformation(mask)
# Custom transformations
if self.transform:
image, mask = self._random_transform(image, mask)
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
sample = {'index': int(index), 'image': image, 'mask': mask}
return sample
def _random_transform(self, image, mask):
""" Applies a set of transformation in random order.
Each transformation has a probability of 0.5
"""
choice_list = list(self.transform)
for _ in range(len(choice_list)):
choice_key = random.choice(choice_list)
if self.DEBUG:
print(f'Transform choose: {choice_key}')
action_prob = random.randint(0, 1)
if action_prob >= 0.5:
if self.DEBUG:
print(f'\tApplying transformation: {choice_key}')
if choice_key == 'rotate':
rotation = random.randint(15, 75)
if self.DEBUG:
print(f'\t\tRotation by: {rotation}')
image = self.transform[choice_key](image, rotation)
mask = self.transform[choice_key](mask, rotation)
else:
image = self.transform[choice_key](image)
mask = self.transform[choice_key](mask)
choice_list.remove(choice_key)
return image, mask
def __len__(self):
""" Overridden method from inheritted class so that
len(self) returns the size of the dataset.
"""
error_msg = 'Part of dataset is missing!\nNumber of tumor and mask images are not same.'
total_files = len(os.listdir(self.root_dir))
assert (total_files % 2 == 0), error_msg
return total_files//2
|
1649084
|
import numpy as np
import pandas as pd
import pytest
from activitysim.abm.models.util.trip import get_time_windows
@pytest.mark.parametrize("duration, levels, expected",
[(24, 3, 2925), (24, 2, 325), (24, 1, 25),
(48, 3, 20825), (48, 2, 1225), (48, 1, 49)])
def test_get_time_windows(duration, levels, expected):
time_windows = get_time_windows(duration, levels)
if levels == 1:
assert time_windows.ndim == 1
assert len(time_windows) == expected
assert np.sum(time_windows <= duration) == expected
else:
assert len(time_windows) == levels
assert len(time_windows[0]) == expected
total_duration = np.sum(time_windows, axis=0)
assert np.sum(total_duration <= duration) == expected
df = pd.DataFrame(np.transpose(time_windows))
assert len(df) == len(df.drop_duplicates())
|
1649098
|
from typing import Any, Optional
class ImportErrorMixin:
"""
Mixin class which always raises :class:`ImportError`.
Subclasses can modify the message by overriding `__import_error_message__`.
Parameters
----------
args
Ignored.
kwargs
Ignored.
Raises
------
ImportError
Always.
"""
__import_error_message__ = "Unable to import the class."
def __init__(self, *args: Any, **kwargs: Any):
raise ImportError(self.__import_error_message__) from self.__error__
def __init_subclass__(cls, error: Optional[Exception] = None, **kwargs: Any):
super().__init_subclass__(**kwargs)
cls.__error__ = error
|
1649099
|
from django.conf.urls import url
from jekyllnow.views import (
JekyllNowTheme
)
urlpatterns = [
url(r'^jn-init/$', JekyllNowTheme.as_view(), name='jn-init'),
]
|
1649105
|
class StreamingAPIError(Exception):
def __init__(self, code: int, message: str):
self.code = code
self.text = message
self.message = f"[{code}] {message}"
super().__init__(self.message)
|
1649119
|
import spacy
# ja_core_news_sm モデルを読み込む
nlp = spacy.load("ja_core_news_sm")
# パイプラインの名前を出力
print(nlp.pipe_names)
# (name, component) のタプルからなるパイプライン情報を表示
print(nlp.pipeline)
|
1649140
|
from .rman_translator import RmanTranslator
from ..rman_sg_nodes.rman_sg_runprogram import RmanSgRunProgram
class RmanRunProgramTranslator(RmanTranslator):
def __init__(self, rman_scene):
super().__init__(rman_scene)
self.bl_type = 'PROCEDURAL_RUN_PROGRAM'
def export(self, ob, db_name):
sg_node = self.rman_scene.sg_scene.CreateProcedural(db_name)
sg_node.Define(self.rman_scene.rman.Tokens.Rix.k_RunProgram, None)
rman_sg_runprogram = RmanSgRunProgram(self.rman_scene, sg_node, db_name)
return rman_sg_runprogram
def export_deform_sample(self, rman_sg_runprogram, ob, time_sample):
pass
def update(self, ob, rman_sg_runprogram):
rm = ob.renderman
path_runprogram = rm.path_runprogram
bounds = (-100000, 100000, -100000, 100000, -100000, 100000 )
primvar = rman_sg_runprogram.sg_node.GetPrimVars()
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_filename, runprogram_path)
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_data, rm.runprogram_args)
primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_bound, bounds, 6)
rman_sg_runprogram.sg_node.SetPrimVars(primvar)
|
1649181
|
import django_filters
from django.db.models import Q
from django.forms.widgets import CheckboxSelectMultiple
from api.models import LauncherConfig, Launch, Location, Pad
class LaunchListFilter(django_filters.FilterSet):
search = django_filters.CharFilter(field_name='search', label="Search", method='filter_by_all_name_fields')
location = django_filters.ModelChoiceFilter(field_name='pad__location',
queryset=Location.objects.all(),
label="Location")
start_date = django_filters.DateFilter(field_name='net', lookup_expr='gte', label="Start Date")
end_date = django_filters.DateFilter(field_name='net', lookup_expr='lte', label="End Date")
class Meta:
model = Launch
fields = ['search', 'launch_service_provider', 'status', 'location', 'start_date', 'end_date']
order_by = ['-net']
def filter_by_all_name_fields(self, queryset, name, value):
return queryset.filter(
Q(name__icontains=value) |
Q(mission__name__icontains=value) |
Q(rocket__configuration__full_name__icontains=value) |
Q(rocket__configuration__name__icontains=value) |
Q(launch_service_provider__name__icontains=value) |
Q(launch_service_provider__abbrev__icontains=value)
)
|
1649185
|
from asyncio import sleep
from typing import Callable, Coroutine, Any
Handler = Callable[..., Coroutine[Any, Any, Any]]
async def delayed_task(
seconds: int, handler: Handler, do_break: bool = False, *args, **kwargs
):
while True:
await sleep(seconds)
await handler(*args, **kwargs)
if do_break:
break
|
1649194
|
import sys
sys.path.append("../../")
from appJar import gui
with gui() as app:
with app.labelFrame("framer"):
app.label('hello world')
with app.labelFrame("framer"):
app.label('hhello world')
|
1649196
|
class UdpPacket:
def __init__(self, length: int, payload: bytearray):
self.length = length
self.payload = payload
|
1649222
|
import os
import re
import sys
import pwd
import time
import socket
import platform
import struct
import fcntl
try:
import urllib2
except ImportError:
import urllib.request as urllib2
import subprocess
try:
import lsb_release
except ImportError:
lsb_release = None
from datetime import datetime
from collections import defaultdict
import psutil
try:
from psutil import NoSuchProcess, AccessDenied
except ImportError:
# Compatible < 2.0.0
from psutil._error import NoSuchProcess, AccessDenied
from dash.utils import bytes2human, to_meg
from dash.conf import dnsmasq_lease_file, ping_hosts
is_mac = False
if sys.platform == 'darwin':
is_mac = True
check_list = ['php', 'node', 'mysql', 'vim', 'python', 'ruby', 'java',
'apache2', 'nginx', 'openssl', 'vsftpd', 'make']
def df():
'''disk_usage'''
df = []
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
percent = str(int(usage.percent)) + '%'
disk = [part.device, bytes2human(usage.total),
bytes2human(usage.used), bytes2human(usage.free),
percent, part.mountpoint]
df.append(disk)
return df
def hostname():
return socket.gethostname()
def get_ip_address(ifname):
SIOCGIFADDR = 0x8915
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
ifreq = struct.pack('16sH14s', ifname.encode('utf-8'), socket.AF_INET, b'\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except socket.error:
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
def ip():
url = 'http://ipecho.net/plain'
external_ip = urllib2.urlopen(url).read()
ret = [["external ip", external_ip.decode('utf-8')]]
for network in psutil.net_io_counters(pernic=True).keys():
if is_mac:
import netifaces as ni
try:
ret.append([network, ni.ifaddresses(network)[2][0]['addr']])
except KeyError:
continue
else:
ret.append([network, get_ip_address(network)])
return ret
def issue():
uname = platform.uname()[2]
if lsb_release is not None:
distinfo = lsb_release.get_distro_information()
lsb = distinfo.get('DESCRIPTION', 'n/a')
elif is_mac:
lsb = 'OS X' + platform.mac_ver()[0]
else:
lsb = ' '.join(platform.dist())
return lsb + '\n' + uname
def mem():
phymem = psutil.virtual_memory()
total = phymem.total
#phymem.free + buffers + cached
free = phymem.available
used = total - free
return ['Mem', to_meg(total), to_meg(used), to_meg(free)]
def numberofcores():
return str(psutil.NUM_CPUS)
def w():
# TODO user idle time not yet achieve
user_idle_time = '0.00s'
ret = []
for u in psutil.users():
ret.append([u.name,
u.host,
datetime.fromtimestamp(u.started).strftime("%H:%M"),
user_idle_time
])
return ret
def ps():
ret = []
for p in psutil.pids():
try:
p_info = psutil.Process(p)
# If stty
if psutil.__version__ < '2.0.0':
isterminal = p_info.terminal
create_time = p_info.create_time
cmdline = p_info.cmdline
usernmae = p_info.username
else:
isterminal = p_info.terminal()
create_time = p_info.create_time()
cmdline = p_info.cmdline()
username = p_info.username()
if isterminal:
terminal = isterminal.replace('/dev/tty', '')
else:
terminal = '??'
# user + system (alias cputime)
cpu_time = (p_info.cpu_times().user +
p_info.cpu_times().system)
minute = int(cpu_time / 60)
cpu_time = str(minute) + ':' + '%.2f' % (cpu_time - minute * 60)
ret.append([username,
p,
p_info.cpu_percent(),
'%.1f' % p_info.memory_percent(),
p_info.memory_info().vms / 1024, # vsz
p_info.memory_info().rss / 1024, # rss
terminal,
str(p_info.status), # STAT
datetime.fromtimestamp(
create_time).strftime("%I:%M%p"),
cpu_time,
' '.join(cmdline)
])
except (NoSuchProcess, AccessDenied):
continue
return ret
def users():
ret = []
for u in pwd.getpwall():
if u.pw_uid <= 499:
type = 'system'
else:
type = 'user'
ret.append([type, u.pw_name, u.pw_dir])
return ret
def whereis():
ret = []
# When soft install more then one
d = defaultdict(list)
all_available_cmd = {}
has_installed = []
all_path = os.environ['PATH'].split(':')
for path in all_path:
try:
_, _, files = next(os.walk(path))
all_available_cmd[path] = files
except StopIteration: # Maybe this PATH has not exists
continue
for path, cmd_list in all_available_cmd.items():
for cmd in cmd_list:
if cmd in check_list:
has_installed.append(cmd)
d[cmd].append(os.path.join(path, cmd))
for i in d.items():
ret.append(list(i))
not_installed = list(set(check_list).difference(set(has_installed)))
for n in not_installed:
ret.append([n, 'Not Installed'])
return ret
def boot():
try:
boot_time = psutil.boot_time()
except AttributeError:
boot_time = psutil.get_boot_time()
# Compatible < 2.0.0
has_boot = time.time() - boot_time
hour = int(has_boot / 3600)
return str(hour) + ':' + str(int((has_boot - hour * 3600) / 60))
def loadavg():
load = os.getloadavg()
cores = psutil.NUM_CPUS
return list(map(lambda x: ['%.2f' % x, '%.2f' % (x * 100 / cores)], load))
def bandwidth():
def ret_netstat():
netstat = defaultdict(int)
get_net_io_counters = psutil.net_io_counters(pernic=True)
for net in get_net_io_counters:
netstat['tx'] += get_net_io_counters[net].bytes_sent
netstat['rx'] += get_net_io_counters[net].bytes_recv
return netstat
old = ret_netstat()
time.sleep(2)
new = ret_netstat()
return dict(tx=(new['tx'] - old['tx'])/2, rx=(new['rx'] - old['rx'])/2)
def dnsmasq_leases():
if os.path.exists(dnsmasq_lease_file):
ret = []
with open(dnsmasq_lease_file) as f:
for line in f.readlines():
line = line.strip()
if line:
l = line.split()
t = datetime.fromtimestamp(
l[0]).strftime('%m/%d/%Y %H:%M:%S')
ret.append([t, l[1], l[2], l[3]])
return ret
else:
return []
def ping():
#ICMP requiring root privileges. so I can not implementation written by
# python
avg_regex = re.compile(b'dev =.*?/(.*?)/.*?/.*ms')
p_cmd = None
for i in ['/bin/ping', '/sbin/ping']:
if os.path.exists(i):
p_cmd = i
if p_cmd is None:
return []
def ret_ping(host):
ping = subprocess.Popen("{} -qc 2 {}".format("/sbin/ping", host),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
shell = True)
match = avg_regex.search(ping.stdout.read())
if match:
return match.group(1).decode('utf-8')
else:
return ''
ret = []
if os.path.exists(ping_hosts):
with open(ping_hosts) as f:
for host in f.readlines():
host = host.strip('\n\t#')
if host:
ret.append([host, ret_ping(host)])
else:
for host in ['gnu.org', 'github.com', 'wikipedia.org']:
ret.append([host, ret_ping(host)])
return ret
def date():
return time.strftime('%a %b %d %H:%M:%S %Z %Y')
|
1649227
|
import pytest
@pytest.fixture
def params_parser(dummy_request):
from snosearch.parsers import ParamsParser
from snovault.elasticsearch import ELASTIC_SEARCH
from elasticsearch import Elasticsearch
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&assay_title=Histone+ChIP-seq&award.project=Roadmap'
'&assembly=GRCh38&biosample_ontology.classification=primary+cell'
'&target.label=H3K27me3&biosample_ontology.classification%21=cell+line'
'&biosample_ontology.term_name%21=naive+thymus-derived+CD4-positive%2C+alpha-beta+T+cell'
'&limit=10&status=released&searchTerm=chip-seq&sort=date_created&sort=-files.file_size'
'&field=@id&field=accession&cart=abc123'
)
dummy_request.registry[ELASTIC_SEARCH] = Elasticsearch()
return ParamsParser(dummy_request)
def test_searches_queries_cart_search_query_factory_init(params_parser):
from encoded.cart_view import CartWithElements
from encoded.searches.queries import CartQueryMixin
from encoded.searches.queries import CartSearchQueryFactory
csqf = CartSearchQueryFactory(
params_parser,
cart=CartWithElements(params_parser._request)
)
assert isinstance(csqf, CartSearchQueryFactory)
assert isinstance(csqf, CartQueryMixin)
assert csqf.params_parser == params_parser
assert hasattr(csqf, '_get_post_filters_with_carts')
def test_searches_queries_cart_search_query_factory_with_facets_init(params_parser):
from encoded.cart_view import CartWithElements
from encoded.searches.queries import CartQueryMixin
from encoded.searches.queries import CartSearchQueryFactoryWithFacets
csqf = CartSearchQueryFactoryWithFacets(
params_parser,
cart=CartWithElements(params_parser._request)
)
assert isinstance(csqf, CartSearchQueryFactoryWithFacets)
assert isinstance(csqf, CartQueryMixin)
assert csqf.params_parser == params_parser
assert hasattr(csqf, '_get_post_filters_with_carts')
def test_searches_queries_cart_matrix_query_factory_with_facets_init(params_parser):
from encoded.cart_view import CartWithElements
from encoded.searches.queries import CartQueryMixin
from encoded.searches.queries import CartMatrixQueryFactoryWithFacets
cmqf = CartMatrixQueryFactoryWithFacets(
params_parser,
cart=CartWithElements(params_parser._request)
)
assert isinstance(cmqf, CartMatrixQueryFactoryWithFacets)
assert isinstance(cmqf, CartQueryMixin)
assert cmqf.params_parser == params_parser
assert hasattr(cmqf, '_get_post_filters_with_carts')
def test_searches_queries_cart_report_query_factory_with_facets_init(params_parser):
from encoded.cart_view import CartWithElements
from encoded.searches.queries import CartQueryMixin
from encoded.searches.queries import CartReportQueryFactoryWithFacets
crqf = CartReportQueryFactoryWithFacets(params_parser)
assert isinstance(crqf, CartReportQueryFactoryWithFacets)
assert crqf.params_parser == params_parser
assert isinstance(crqf, CartReportQueryFactoryWithFacets)
assert isinstance(crqf, CartQueryMixin)
assert crqf.params_parser == params_parser
assert hasattr(crqf, '_get_post_filters_with_carts')
|
1649279
|
from __future__ import division
# TODO: Add generator that considers lat/long (distance to North and South poles).
class Moisture:
def generate(self, map_obj):
# Calculate moisture. Freshwater sources spread moisture: rivers
# and lakes (not oceans). Saltwater sources have moisture but do
# not spread it (we set it at the end, after propagation).
corners_queue = []
for corner in map_obj.corners:
if corner.water and not corner.ocean:
corner.moisture = 1
corners_queue.append(corner)
elif corner.river > 0:
corner.moisture = min(3, 0.2 * corner.river)
corners_queue.append(corner)
while corners_queue:
corner = corners_queue.pop(0)
for neighbour in corner.adjacent:
new_moisture = corner.moisture * 0.9
if new_moisture > neighbour.moisture:
neighbour.moisture = new_moisture
corners_queue.append(neighbour)
# ocean moisture
for corner in map_obj.corners:
if corner.ocean or corner.coast:
corner.moisture = 1
self._redistribute_moisture(map_obj.land_corners)
# calculate moisture and biome for centers
for center in map_obj.centers:
center.moisture = sum([c.moisture for c in center.corners]) / len(center.corners)
center.biome = self.get_biome(center)
def _redistribute_moisture(self, corners):
"""
Change the overall distribution of moisture to be evenly distributed.
"""
corners.sort(key=lambda c: c.moisture)
for i, corner in enumerate(corners):
corner.moisture = i / (len(corners) - 1)
def get_biome(self, center):
"""
+-----------+-----------------------------------------------------------------------+
| Elevation | Moisture Zone |
| Zone +-----------+-----------+-----------+-----------+-----------+-----------+
| | 6 (wet) | 5 | 4 | 3 | 2 | 1 (dry) |
+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
| | | | | |
| 4 (high) | SNOW | TUNDRA | BARE | SCORCHED |
| | | | | |
+-----------+-----------------------+-----------+-----------+-----------+-----------+
| | | | |
| 3 | TAIGA | SHRUBLAND | TEMPERATE DESERT |
| | | | |
+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
| | TEMPERATE | TEMPERATE | | TEMPERATE |
| 2 | RAIN | DECIDUOUS | GRASSLAND | DESERT |
| | FOREST | FOREST | | |
+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
| | TROPICAL RAIN | TROPICAL SEASONAL | |SUBTROPICAL|
| 1 (low) | FOREST | FOREST | GRASSLAND | DESERT |
| | | | | |
+-----------+-----------------------+-----------------------+-----------+-----------+
"""
elevation = center.elevation
moisture = center.moisture
if center.ocean:
biome = 'OCEAN'
elif center.water:
if elevation < 0.1:
# FIXME: fix lake elevation at first, now it is set to 0
# biome = 'MARSH'
biome = 'LAKE'
elif elevation > 0.8:
biome = 'ICE'
else:
biome = 'LAKE'
elif center.coast:
biome = 'BEACH'
elif elevation > 0.8:
if moisture > 0.50:
biome = 'SNOW'
elif moisture > 0.33:
biome = 'TUNDRA'
elif moisture > 0.16:
biome = 'BARE'
else:
biome = 'SCORCHED'
elif elevation > 0.6:
if moisture > 0.66:
biome = 'TAIGA'
elif moisture > 0.33:
biome = 'SHRUBLAND'
else:
biome = 'TEMPERATE_DESERT'
elif elevation > 0.3:
if moisture > 0.83:
biome = 'TEMPERATE_RAIN_FOREST'
elif moisture > 0.50:
biome = 'TEMPERATE_DECIDUOUS_FOREST'
elif moisture > 0.16:
biome = 'GRASSLAND'
else:
biome = 'TEMPERATE_DESERT'
else:
if moisture > 0.66:
biome = 'TROPICAL_RAIN_FOREST'
elif moisture > 0.33:
biome = 'TROPICAL_SEASONAL_FOREST'
elif moisture > 0.16:
biome = 'GRASSLAND'
else:
biome = 'SUBTROPICAL_DESERT'
return biome
|
1649285
|
from .cbl_type import CBLType, CBLTypeInstance, CBLTypeMeta
from .containers import Temporary
from .function_type import InstanceFunctionType
import cmd_ir.instructions as i
class StructTypeInstance(CBLTypeInstance):
def __init__(self, compiler, this, var_members, func_members, func_properties):
super().__init__(func_members, func_properties)
self.__this = this
self.__var_members = []
for name, var_type in var_members.items():
self.__var_members.append(self.construct_var(compiler, name,
var_type))
def construct_var(self, compiler, name, type):
value = type.allocate(compiler, name)
return self.construct_member(name, type, value)
def as_variables(self):
if self.__this is None:
vars = []
for m in self.__var_members:
vars.extend(m.type.as_variables(m.value))
return vars
return (self.__this,)
def as_variable(self, typename):
assert self.__this is not None, "Cannot convert %s to variable" % typename
return self.__this
class StructTypeInstanceShadow(StructTypeInstance):
def __init__(self, shadow_instance, *args):
self.__shadow = shadow_instance
super().__init__(*args)
def construct_var(self, compiler, name, type):
value = self.__shadow.get_member(compiler, name).value
return self.construct_member(name, type, value)
class StructuredType(CBLType):
def __init__(self):
super().__init__()
self.__var_members = {}
self.__vars_allowed = True
self._is_nbt = False
@property
def meta_type_type(self):
return StructTypeMeta
def extend_from(self, parent):
super().extend_from(parent)
if isinstance(parent, StructuredType):
self.__var_members.update(parent.get_var_members())
@property
def ir_type(self):
if self._is_nbt:
return i.VarType.nbt
raise TypeError('%s does not have an IR type' % self)
def ir_types(self):
if self._is_nbt:
return (self.ir_type,)
types = []
for m_type in self.__var_members.values():
types.extend(m_type.ir_types())
return types
def as_variable(self, instance):
return instance.as_variable(self.typename)
def as_variables(self, instance):
return instance.as_variables()
def instance_member(self, name):
m = super().instance_member(name)
if m is None:
m = self.__var_members.get(name)
return m
def get_var_members(self):
return dict(self.__var_members)
def effective_var_size(self):
if self._is_nbt:
return 1
return sum(t.effective_var_size() for t in self.__var_members.values())
def add_variable_member(self, name, type):
self.__can_extend = False
if self.instance_member(name):
raise KeyError('%s is already defined in type %s' % (name,
self.name))
if not self.__vars_allowed:
raise RuntimeError('Cannot add more variables. Tried adding %s' % \
name)
self.__var_members[name] = type
def allocate(self, compiler, namehint):
assert not self.incomplete, "Incomplete type %s" % self.typename
if self._is_nbt:
this = compiler.create_var(namehint, i.VarType.nbt)
def create_sub_var(subname, var_type):
path = i.VirtualString('.' + subname)
insn = i.NBTSubPath(this, path, var_type)
return compiler.define(namehint + '_' + subname, insn)
else:
this = None
orig_create_var = compiler.create_var
def create_sub_var(subname, var_type):
return orig_create_var(namehint + '_' + subname, var_type)
with compiler.set_create_var(create_sub_var):
return StructTypeInstance(compiler, this, self.__var_members,
self.get_func_members(),
self.get_func_properties())
def add_function_member(self, compiler, name, ret_type, params, inline,
is_async):
self.__complete_vars()
return super().add_function_member(compiler, name, ret_type, params,
inline, is_async)
def add_operator_member(self, compiler, op, ret_type, params, inline):
self.__complete_vars()
return super().add_operator_member(compiler, op, ret_type, params,
inline)
def add_constructor(self, compiler, params, inline):
self.__complete_vars()
return super().add_constructor(compiler, params, inline)
def __complete_vars(self):
if not self.__vars_allowed:
return
self.__vars_allowed = False
self.__can_extend = False
# Initially we are not NBT wrapped
size = self.effective_var_size()
# Become NBT wrapped if size exceeds 3 variables
if size > 3:
self._is_nbt = True
def _copy_impl(self, compiler, this, other):
thisobj = this.value
if self._is_nbt:
compiler.add_insn(i.SetScore(thisobj.as_variable(self.typename),
other.value.as_variable(other.type.typename)))
else:
# Pair each var member
for var in self.__var_members.keys():
lvar = thisobj.get_member(compiler, var)
rvar = other.value.get_member(compiler, var)
lvar.type.dispatch_operator(compiler, '=', lvar, rvar)
return other
def _default_ctor(self, compiler, container, args):
ret = super()._default_ctor(compiler, container, args)
self.__construct_members(compiler, container.this, {})
return ret
def complete_type(self, compiler):
self.__complete_vars()
super().complete_type(compiler)
def do_construction(self, compiler, thisobj, member_inits):
if self.parent_type:
pname = self.parent_type.typename
pargs = ()
# Steal parent arguments if exists from member_inits
if pname in member_inits:
pargs = member_inits[pname]
del member_inits[pname]
self._construct_parent(compiler, thisobj, pargs)
self.__construct_members(compiler, thisobj, member_inits)
def __construct_members(self, compiler, thisobj, member_inits):
own_members = self.__var_members
if isinstance(self.parent_type, StructuredType):
p_members = self.parent_type.get_var_members().keys()
own_members = { name: m for name, m in self.__var_members.items() \
if name not in p_members }
for name in member_inits.keys():
assert name in own_members, (self, name)
for varname in own_members.keys():
member = thisobj.get_member(compiler, varname)
args = member_inits.get(varname, ())
member.type.run_constructor(compiler, member, args)
def coerce_to(self, compiler, container, type):
super_did_coerce = super().coerce_to(compiler, container, type)
if super_did_coerce:
return super_did_coerce
if self.parent_type is not None:
if type == self.parent_type and isinstance(type, StructuredType):
# Can re-use the nbt wrapper. Since extend is append-only
# we know self._is_nbt == True
if type._is_nbt:
return container
# Create a shadow copy using the subset of our members
# found in the parent type
val = StructTypeInstanceShadow(container.value, compiler, None,
type.get_var_members(),
type.get_func_members(),
type.get_func_properties())
return Temporary(type, val)
# Walk the hierarchy to see if we can coerce from a parent type
return self.parent_type.coerce_to(compiler, container, type)
return None
class StructTypeMeta(CBLTypeMeta, StructuredType):
def __init__(self, the_type):
StructuredType.__init__(self)
CBLTypeMeta.__init__(self, the_type)
def create_meta(self, compiler, namehint):
super().create_meta(compiler, namehint)
for name, type in self.get_var_members().items():
sym = compiler.scope.declare_symbol(name, type)
self._meta_instance[name] = sym
|
1649297
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import (
cross_val_score,
train_test_split,
ShuffleSplit,
)
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score, recall_score
from sklearn.metrics import confusion_matrix
import pickle
from utils import *
# データの読み込み
data = pd.read_csv("./tfidf.csv", encoding="utf-8")
# plag_data = 216
# non_plag_data = 216
# 標準化
def standardization(df):
return (df - df.values.mean()) / df.values.std()
# 正規化
def normalization(df):
return (df - df.values.min()) / (df.values.max() - df.values.min())
# 学習用とテスト用に分離する
f1_sum = acc_sum = rec_sum = 0
tp_sum = fp_sum = tn_sum = fn_sum = 0
rep = 10 # 繰り返し回数
for i in range(rep):
print("\n--", i, "-" * 20)
x_train, x_test, y_train, y_test, plag_num, nonplag_num = split(data)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# 学習する
clf = RandomForestClassifier()
scores = cross_val_score(clf, x_train, y_train, cv=5)
print(scores)
print("cross-val-score : ", scores.mean())
clf.fit(x_train, y_train)
# 評価する
y_pred = clf.predict(x_test)
y_prob = clf.predict_proba(x_test) # 確率出す
acc = accuracy_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print("正解率 = ", acc)
print("リコール = ", rec)
print("F値 = ", f1)
filename = "model.sav"
pickle.dump(clf, open(filename, "wb"))
tn, fp, fn, tp = confusion_matrix(y_test, y_pred, labels=[0, 1]).flatten()
print("TP : {}, FP : {}".format(tp, fp))
print("FN : {}, TN : {}".format(fn, tn))
title = "Learning Curves"
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
plot_learning_curve(clf, title, x_train, y_train, cv=cv)
acc_sum += acc
rec_sum += rec
f1_sum += f1
tp_sum += tp
fp_sum += fp
tn_sum += tn
fn_sum += fn
acc_sum /= rep
rec_sum /= rep
f1_sum /= rep
tp_sum /= rep
fp_sum /= rep
tn_sum /= rep
fn_sum /= rep
print("accuracy :", acc_sum)
print("recall :", rec_sum)
print("F1-measure :", f1_sum)
print("TP : {}%, FP : {}%".format(tp_sum/x_test.shape[0], fp_sum/x_test.shape[0]))
print("FN : {}%, TN : {}%".format(fn_sum/x_test.shape[0], tn_sum/x_test.shape[0]))
|
1649309
|
from sublime_plugin import WindowCommand
from ..libraries import serial
from ..libraries.tools import get_setting
class DeviotCleanConsoleCommand(WindowCommand):
monitor = None
def run(self):
self.monitor.clean_console()
def is_enabled(self):
port_id = get_setting('port_id', None)
if(port_id and port_id in serial.serials_in_use):
self.monitor = serial.serial_monitor_dict[port_id]
if(self.monitor.is_running):
return True
return False
|
1649324
|
from asgi_webdav.constants import (
DAVPath,
)
def test_basic():
path = DAVPath("/a/b/c")
assert path.raw == "/a/b/c"
assert path.parts == ["a", "b", "c"]
assert path.count == 3
assert path.parent == DAVPath("/a/b")
assert path.name == "c"
assert path.startswith(DAVPath("/a/b"))
assert path.get_child(DAVPath("/a/b")) == DAVPath("/c")
assert path.add_child("d") == DAVPath("/a/b/c/d")
assert path.add_child(DAVPath("/d/e")) == DAVPath("/a/b/c/d/e")
def test_some_error():
path = DAVPath("/a/b/c")
print(path.add_child("/d/e"))
assert path.add_child("/d/e") != DAVPath("/a/b/c/de")
|
1649340
|
import asyncio
import logging
from time import sleep
from sanic import Sanic
from sanic.exceptions import ServiceUnavailable
from sanic.log import LOGGING_CONFIG_DEFAULTS
from sanic.response import text
response_timeout_app = Sanic("test_response_timeout")
response_timeout_default_app = Sanic("test_response_timeout_default")
response_handler_cancelled_app = Sanic("test_response_handler_cancelled")
response_timeout_app.config.RESPONSE_TIMEOUT = 1
response_timeout_default_app.config.RESPONSE_TIMEOUT = 1
response_handler_cancelled_app.config.RESPONSE_TIMEOUT = 1
response_handler_cancelled_app.ctx.flag = False
@response_timeout_app.route("/1")
async def handler_1(request):
await asyncio.sleep(2)
return text("OK")
@response_timeout_app.exception(ServiceUnavailable)
def handler_exception(request, exception):
return text("Response Timeout from error_handler.", 503)
@response_timeout_default_app.route("/1")
async def handler_2(request):
await asyncio.sleep(2)
return text("OK")
@response_handler_cancelled_app.exception(asyncio.CancelledError)
def handler_cancelled(request, exception):
# If we get a CancelledError, it means sanic has already sent a response,
# we should not ever have to handle a CancelledError.
response_handler_cancelled_app.ctx.flag = True
return text("App received CancelledError!", 500)
# The client will never receive this response, because the socket
# is already closed when we get a CancelledError.
@response_handler_cancelled_app.route("/1")
async def handler_3(request):
await asyncio.sleep(2)
return text("OK")
def test_server_error_response_timeout():
request, response = response_timeout_app.test_client.get("/1")
assert response.status == 503
assert response.text == "Response Timeout from error_handler."
def test_default_server_error_response_timeout():
request, response = response_timeout_default_app.test_client.get("/1")
assert response.status == 503
assert "Response Timeout" in response.text
def test_response_handler_cancelled():
request, response = response_handler_cancelled_app.test_client.get("/1")
assert response.status == 503
assert "Response Timeout" in response.text
assert response_handler_cancelled_app.ctx.flag is False
def test_response_timeout_not_applied(caplog):
modified_config = LOGGING_CONFIG_DEFAULTS
modified_config["loggers"]["sanic.root"]["level"] = "DEBUG"
app = Sanic("test_logging", log_config=modified_config)
app.config.RESPONSE_TIMEOUT = 1
app.ctx.event = asyncio.Event()
@app.websocket("/ws")
async def ws_handler(request, ws):
sleep(2)
await asyncio.sleep(0)
request.app.ctx.event.set()
with caplog.at_level(logging.DEBUG):
_ = app.test_client.websocket("/ws")
assert app.ctx.event.is_set()
assert (
"sanic.root",
10,
"Handling websocket. Timeouts disabled.",
) in caplog.record_tuples
|
1649361
|
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.fmp4_api import Fmp4Api
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.information.information_api import InformationApi
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.drm.drm_api import DrmApi
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.fmp4_muxing_list_query_params import Fmp4MuxingListQueryParams
|
1649363
|
from __future__ import unicode_literals
SEQUENCE = [
'old_tool_model',
'tool_working_directory_required',
]
|
1649370
|
from IPython.display import HTML
def print_frames(dataframes):
if not isinstance(dataframes, tuple):
return dataframes
border_style = u'\"border: none\"'
cells = [u'<td style={}> {} </td>'.format(border_style, df._repr_html_()) for df in dataframes]
table = '''<table style={}>
<tr style={}>'''.format(border_style, border_style) +\
'\n'.join(cells)+\
'''
</tr>
</table>'''
return HTML(table)
|
1649431
|
from .ner import *
from .multi_choice import *
from .sequence_classification import *
from .record_qa import *
from .masked_language_model import *
|
1649435
|
import doctest
import insights.parsers.octavia as octavia_module
from insights.parsers.octavia import OctaviaConf, VALID_KEYS
from insights.tests import context_wrap
CONF_FILE = """
[DEFAULT]
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = False
# Plugin options are hot_plug_plugin (Hot-pluggable controller plugin)
octavia_plugins = hot_plug_plugin
# Hostname to be used by the host machine for services running on it.
# The default value is the hostname of the host machine.
host = some_hostname.some_domain.com
# AMQP Transport URL
# For Single Host, specify one full transport URL:
# transport_url = rabbit://<user>:<pass>@127.0.0.1:5672/<vhost>
# For HA, specify queue nodes in cluster, comma delimited:
# transport_url = rabbit://<user>:<pass>@server01,<user>:<pass>@server02/<vhost>
transport_url =
# How long in seconds to wait for octavia worker to exit before killing them.
graceful_shutdown_timeout = 60
log_file = some_file
log_dir = some_dir
policy_file = some_policy_file
[api_settings]
bind_host = 127.0.0.1
bind_port = 9876
# How should authentication be handled (keystone, noauth)
auth_strategy = keystone
allow_pagination = True
allow_sorting = True
pagination_max_limit = 1000
# Base URI for the API for use in pagination links.
# This will be autodetected from the request if not overridden here.
# Example:
# api_base_uri = http://localhost:9876
api_base_uri = http://localhost:9876
# Enable/disable ability for users to create TLS Terminated listeners
allow_tls_terminated_listeners = True
# Enable/disable ability for users to create PING type Health Monitors
allow_ping_health_monitors = True
# Dictionary of enabled provider driver names and descriptions
# A comma separated list of dictionaries of the enabled provider driver names
# and descriptions.
enabled_provider_drivers = amphora:The Octavia Amphora driver.,octavia: \\
Deprecated alias of the Octavia Amphora driver.
# Default provider driver
default_provider_driver = amphora
# The minimum health monitor delay interval for UDP-CONNECT Health Monitor type
udp_connect_min_interval_health_monitor = 3
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql+pymysql://root:pass@127.0.0.1:3306/octavia
# Replace 127.0.0.1 above with the IP address of the database used by the
# main octavia server. (Leave it as is if the database runs on this host.)
connection = mysql+pymysql://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
[health_manager]
bind_ip = 127.0.0.1
bind_port = 5555
# controller_ip_port_list example: 127.0.0.1:5555, 127.0.0.1:5555
controller_ip_port_list = 127.0.0.1:5555, 127.0.0.1:5555
failover_threads = 10
# status_update_threads will default to the number of processors on the host.
# This setting is deprecated and if you specify health_update_threads and
# stats_update_threads, they override this parameter.
status_update_threads = 10
# health_update_threads will default to the number of processors on the host
health_update_threads = 10
# stats_update_threads will default to the number of processors on the host
stats_update_threads = 10
heartbeat_interval = 10
# Symmetric encrpytion key
heartbeat_key =
heartbeat_timeout = 60
health_check_interval = 3
sock_rlimit = 0
# Health/StatsUpdate options are
# *_db
# *_logger
health_update_driver = health_db
stats_update_driver = stats_db
[keystone_authtoken]
# This group of config options are imported from keystone middleware. Thus the
# option names should match the names declared in the middleware.
# The www_authenticate_uri is the public endpoint and is returned in headers on a 401
# www_authenticate_uri = https://localhost:5000/v3
# The auth_url is the admin endpoint actually used for validating tokens
auth_url = https://localhost:5000/v3
username = octavia
password = password
project_name = service
# Domain names must be set, these are *not* default but work for most clouds
# project_domain_name = Default
user_domain_name = Default
insecure = False
cafile =
[certificates]
# Certificate Generator options are local_cert_generator
cert_generator = local_cert_generator
# For local certificate signing:
ca_certificate = /etc/ssl/certs/ssl-cert-snakeoil.pem
ca_private_key = /etc/ssl/private/ssl-cert-snakeoil.key
ca_private_key_passphrase =
server_certs_key_passphrase = <PASSWORD>
signing_digest = sha256
cert_validity_time = 2592000 # 30 days = 30d * 24h * 60m * 60s = 2592000s
storage_path = /var/lib/octavia/certificates/
# For the TLS management
# Certificate Manager options are local_cert_manager
# barbican_cert_manager
# castellan_cert_manager
cert_manager = barbican_cert_manager
# For Barbican authentication (if using any Barbican based cert class)
barbican_auth = barbican_acl_auth
#
# Region in Identity service catalog to use for communication with the Barbican service.
region_name = some_region
#
# Endpoint type to use for communication with the Barbican service.
endpoint_type = publicURL
[networking]
# The maximum attempts to retry an action with the networking service.
max_retries = 15
# Seconds to wait before retrying an action with the networking service.
retry_interval = 1
# The maximum time to wait, in seconds, for a port to detach from an amphora
port_detach_timeout = 300
# Allow/disallow specific network object types when creating VIPs.
allow_vip_network_id = True
allow_vip_subnet_id = True
allow_vip_port_id = True
# List of network_ids that are valid for VIP creation.
# If this field empty, no validation is performed.
valid_vip_networks =
# List of reserved IP addresses that cannot be used for member addresses
# The default is the nova metadata service address
reserved_ips = ['169.254.169.254']
[haproxy_amphora]
base_path = /var/lib/octavia
base_cert_dir = /var/lib/octavia/certs
# Absolute path to a custom HAProxy template file
haproxy_template = /some/path
connection_logging = True
connection_max_retries = 120
connection_retry_interval = 5
build_rate_limit = -1
build_active_retries = 120
build_retry_interval = 5
# Maximum number of entries that can fit in the stick table.
# The size supports "k", "m", "g" suffixes.
haproxy_stick_size = 10k
# REST Driver specific
bind_host = 0.0.0.0
bind_port = 9443
#
# This setting is only needed with IPv6 link-local addresses (fe80::/64) are
# used for communication between Octavia and its Amphora, if IPv4 or other IPv6
# addresses are used it can be ignored.
lb_network_interface = o-hm0
#
haproxy_cmd = /usr/sbin/haproxy
respawn_count = 2
respawn_interval = 2
client_cert = /etc/octavia/certs/client.pem
server_ca = /etc/octavia/certs/server_ca.pem
#
# This setting is deprecated. It is now automatically discovered.
use_upstart = True
#
rest_request_conn_timeout = 10
rest_request_read_timeout = 60
#
# These "active" timeouts are used once the amphora should already
# be fully up and active. These values are lower than the other values to
# facilitate "fail fast" scenarios like failovers
active_connection_max_retries = 15
active_connection_rety_interval = 2
# The user flow log format for HAProxy.
# {{ project_id }} and {{ lb_id }} will be automatically substituted by the
# controller when configuring HAProxy if they are present in the string.
user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc'
[controller_worker]
workers = 1
amp_active_retries = 30
amp_active_wait_sec = 10
# Glance parameters to extract image ID to use for amphora. Only one of
# parameters is needed. Using tags is the recommended way to refer to images.
amp_image_id =
amp_image_tag =
# Optional owner ID used to restrict glance images to one owner ID.
# This is a recommended security setting.
amp_image_owner_id =
# Nova parameters to use when booting amphora
amp_flavor_id =
# Upload the ssh key as the service_auth user described elsewhere in this config.
# Leaving this variable blank will install no ssh key on the amphora.
amp_ssh_key_name =
amp_ssh_access_allowed = True
# Networks to attach to the Amphorae examples:
# - One primary network
# - - amp_boot_network_list = 22222222-3333-4444-5555-666666666666
# - Multiple networks
# - - amp_boot_network_list = 11111111-2222-33333-4444-555555555555, 22222222-3333-4444-5555-666666666666
# - All networks defined in the list will be attached to each amphora
amp_boot_network_list =
amp_secgroup_list =
client_ca = /etc/octavia/certs/ca_01.pem
# Amphora driver options are amphora_noop_driver,
# amphora_haproxy_rest_driver
#
amphora_driver = amphora_noop_driver
#
# Compute driver options are compute_noop_driver
# compute_nova_driver
#
compute_driver = compute_noop_driver
#
# Network driver options are network_noop_driver
# allowed_address_pairs_driver
#
network_driver = network_noop_driver
# Volume driver options are volume_noop_driver
# volume_cinder_driver
#
volume_driver = volume_noop_driver
#
# Distributor driver options are distributor_noop_driver
# single_VIP_amphora
#
distributor_driver = distributor_noop_driver
#
# Load balancer topology options are SINGLE, ACTIVE_STANDBY
loadbalancer_topology = SINGLE
user_data_config_drive = False
[task_flow]
# TaskFlow engine options are:
# - serial: Runs all tasks on a single thread.
# - parallel: Schedules tasks onto different threads to allow
# for running non-dependent tasks simultaneously
#
engine = parallel
max_workers = 5
#
# This setting prevents the controller worker from reverting taskflow flows.
# This will leave resources in an inconsistent state and should only be used
# for debugging purposes.
disable_revert = False
[oslo_messaging]
# Queue Consumer Thread Pool Size
rpc_thread_pool_size = 2
# Topic (i.e. Queue) Name
topic = octavia_prov
[oslo_middleware]
# HTTPProxyToWSGI middleware enabled
enable_proxy_headers_parsing = False
[house_keeping]
# Interval in seconds to initiate spare amphora checks
spare_check_interval = 30
spare_amphora_pool_size = 0
# Cleanup interval for Deleted amphora
cleanup_interval = 30
# Amphora expiry age in seconds. Default is 1 week
amphora_expiry_age = 604800
# Load balancer expiry age in seconds. Default is 1 week
load_balancer_expiry_age = 604800
[amphora_agent]
agent_server_ca = /etc/octavia/certs/client_ca.pem
agent_server_cert = /etc/octavia/certs/server.pem
# Defaults for agent_server_network_dir when not specified here are:
# Ubuntu: /etc/netns/amphora-haproxy/network/interfaces.d/
# Centos/fedora/rhel: /etc/netns/amphora-haproxy/sysconfig/network-scripts/
#
agent_server_network_dir =
agent_server_network_file =
agent_request_read_timeout = 180
# Minimum TLS protocol, eg: TLS, TLSv1.1, TLSv1.2, TLSv1.3 (if available)
agent_tls_protocol = TLSv1.2
# Amphora default UDP driver is keepalived_lvs
#
amphora_udp_driver = keepalived_lvs
##### Log offloading
#
# Note: The admin and tenant logs can point to the same endpoints.
#
# List of log server ip and port pairs for Administrative logs.
# Additional hosts are backup to the primary server. If none are
# specified, remote logging is disabled.
# Example 192.0.2.1:10514, 2001:db8:1::10:10514'
#
admin_log_targets =
#
# List of log server ip and port pairs for tenant traffic logs.
# Additional hosts are backup to the primary server. If none are
# specified, remote logging is disabled.
# Example 192.0.2.1:10514, 2001:db8:2::15:10514'
#
tenant_log_targets =
# Sets the syslog LOG_LOCAL[0-7] facility number for amphora log offloading.
# user_log_facility will receive the traffic flow logs.
# administrative_log_facility will receive the amphora processes logs.
# Note: Some processes only support LOG_LOCAL, so we are restricted to the
# LOG_LOCAL facilities.
#
user_log_facility = 0
administrative_log_facility = 1
# The log forwarding protocol to use. One of TCP or UDP.
log_protocol = UDP
# The maximum attempts to retry connecting to the logging host.
log_retry_count = 5
# The time, in seconds, to wait between retries connecting to the logging host.
log_retry_interval = 2
# The queue size (messages) to buffer log messages.
log_queue_size = 10000
# Controller local path to a custom logging configuration template.
# Currently this is an rsyslog configuration file template.
logging_template_override =
# When True, the amphora will forward all of the system logs (except tenant
# traffice logs) to the admin log target(s). When False, only amphora specific
# admin logs will be forwarded.
forward_all_logs = False
# When True, no logs will be written to the amphora filesystem. When False,
# log files will be written to the local filesystem.
disable_local_log_storage = False
[keepalived_vrrp]
# Amphora Role/Priority advertisement interval in seconds
vrrp_advert_int = 1
# Service health check interval and success/fail count
vrrp_check_interval = 5
vrrp_fail_count = 2
vrrp_success_count = 2
# Amphora MASTER gratuitous ARP refresh settings
vrrp_garp_refresh_interval = 5
vrrp_garp_refresh_count = 2
[service_auth]
memcached_servers =
cafile = /opt/stack/data/ca-bundle.pem
project_domain_name = Default
project_name = admin
user_domain_name = Default
password = password
username = admin
auth_type = password
auth_url = http://localhost:5555/
[nova]
# The name of the nova service in the keystone catalog
service_name =
# Custom nova endpoint if override is necessary
endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
ca_certificates_file =
# Disable certificate validation on SSL connections
insecure = False
# If non-zero, generate a random name of the length provided for each amphora,
# in the format "a[A-Z0-9]*".
# Otherwise, the default name format will be used: "amphora-{UUID}".
random_amphora_name_length = 0
#
# Availability zone to use for creating Amphorae
availability_zone =
# Enable anti-affinity in nova
enable_anti_affinity = False
# Set the anti-affinity policy to what is suitable.
# Nova supports: anti-affinity and soft-anti-affinity
anti_affinity_policy = anti-affinity
[cinder]
# The name of the cinder service in the keystone catalog
service_name =
# Custom cinder endpoint if override is necessary
endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
endpoint_type = publicURL
# Availability zone to use for creating Volume
availability_zone =
# CA certificates file to verify cinder connections when TLS is enabled
insecure = False
ca_certificates_file =
# Size of root volume in GB for Amphora Instance when use Cinder
# In some storage backends such as ScaleIO, the size of volume is multiple of 8
volume_size = 16
# Volume type to be used for Amphora Instance root disk
# If not specified, default_volume_type from cinder.conf will be used
volume_type =
# Interval time to wait until volume becomes available
volume_create_retry_interval = 5
# Timeout to wait for volume creation success
volume_create_timeout = 300
# Maximum number of retries to create volume
volume_create_max_retries = 5
[glance]
# The name of the glance service in the keystone catalog
service_name =
# Custom glance endpoint if override is necessary
endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
insecure = False
ca_certificates_file =
[neutron]
# The name of the neutron service in the keystone catalog
service_name =
# Custom neutron endpoint if override is necessary
endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
insecure = False
ca_certificates_file =
[quotas]
default_load_balancer_quota = -1
default_listener_quota = -1
default_member_quota = -1
default_pool_quota = -1
default_health_monitor_quota = -1
[audit]
# Enable auditing of API requests.
enabled = False
# Path to audit map file for octavia-api service. Used only
# when API audit is enabled.
audit_map_file = /etc/octavia/octavia_api_audit_map.conf
# Comma separated list of REST API HTTP methods to be
# ignored during audit. For example: auditing will not be done
# on any GET or POST requests if this is set to "GET,POST". It
# is used only when API audit is enabled.
ignore_req_list =
[audit_middleware_notifications]
# Note: This section comes from openstack/keystonemiddleware
# It is included here for documentation convenience and may be out of date
# Indicate whether to use oslo_messaging as the notifier. If set to False,
# the local logger will be used as the notifier. If set to True, the
# oslo_messaging package must also be present. Otherwise, the local will be
# used instead.
use_oslo_messaging = True
# The Driver to handle sending notifications. Possible values are messaging,
# messagingv2, routing, log, test, noop. If not specified, then value from
# oslo_messaging_notifications conf section is used.
driver =
# List of AMQP topics used for OpenStack notifications. If not specified,
# then value from oslo_messaging_notifications conf section is used.
topics =
# A URL representing messaging driver to use for notification. If not
# specified, we fall back to the same configuration used for RPC.
transport_url =
[driver_agent]
status_socket_path = /var/run/octavia/status.sock
stats_socket_path = /var/run/octavia/stats.sock
get_socket_path = /var/run/octavia/get.sock
# Maximum time to wait for a status message before checking for shutdown
status_request_timeout = 5
# Maximum number of status processes per driver-agent
status_max_processes = 50
# Maximum time to wait for a stats message before checking for shutdown
stats_request_timeout = 5
# Maximum number of stats processes per driver-agent
stats_max_processes = 50
# Percentage of max_processes (both status and stats) in use to start
# logging warning messages about an overloaded driver-agent.
max_process_warning_percent = .75
# How long in seconds to wait for provider agents to exit before killing them.
provider_agent_shutdown_timeout = 60
# List of enabled provider agents.
enabled_provider_agents =
"""
DEFAULT_OPTIONS = set([
'debug', 'octavia_plugins', 'graceful_shutdown_timeout',
'log_file', 'log_dir', 'policy_file'
])
def test_full_conf():
# Simulate filtering to allow testing filtered data
filtered_content = []
for line in CONF_FILE.strip().splitlines():
if any([f in line for f in VALID_KEYS]):
filtered_content.append(line)
octavia_conf = OctaviaConf(context_wrap('\n'.join(filtered_content)))
assert octavia_conf is not None
assert set(octavia_conf.defaults().keys()) == DEFAULT_OPTIONS
assert octavia_conf.defaults()['debug'] == 'False'
assert octavia_conf.defaults()['octavia_plugins'] == 'hot_plug_plugin'
assert 'api_settings' in octavia_conf
assert set(octavia_conf.items('api_settings').keys()) == set([
'bind_host', 'bind_port', 'auth_strategy', 'allow_pagination', 'allow_sorting',
'pagination_max_limit', 'api_base_uri', 'allow_tls_terminated_listeners',
'allow_ping_health_monitors', 'enabled_provider_drivers', 'default_provider_driver',
'udp_connect_min_interval_health_monitor'
]) | DEFAULT_OPTIONS
assert 'database' in octavia_conf
assert set(octavia_conf.items('database').keys()) == DEFAULT_OPTIONS
assert 'health_manager' in octavia_conf
assert set(octavia_conf.items('health_manager').keys()) == set([
'bind_ip', 'bind_port', 'controller_ip_port_list', 'failover_threads',
'status_update_threads', 'health_update_threads', 'stats_update_threads',
'heartbeat_interval', 'heartbeat_timeout', 'health_check_interval',
'sock_rlimit', 'health_update_driver', 'stats_update_driver'
]) | DEFAULT_OPTIONS
assert 'keystone_authtoken' in octavia_conf
assert set(octavia_conf.items('keystone_authtoken').keys()) == set(['insecure', 'cafile']) | DEFAULT_OPTIONS
assert 'certificates' in octavia_conf
assert set(octavia_conf.items('certificates').keys()) == set([
'cert_generator', 'signing_digest', 'cert_validity_time', 'storage_path',
'cert_manager', 'region_name', 'endpoint_type'
]) | DEFAULT_OPTIONS
assert 'networking' in octavia_conf
assert set(octavia_conf.items('networking').keys()) == set([
'max_retries', 'retry_interval', 'port_detach_timeout', 'allow_vip_network_id',
'allow_vip_subnet_id', 'allow_vip_port_id', 'reserved_ips'
]) | DEFAULT_OPTIONS
assert 'haproxy_amphora' in octavia_conf
assert set(octavia_conf.items('haproxy_amphora').keys()) == set([
'base_path',
'base_cert_dir',
'haproxy_template',
'connection_logging',
'connection_max_retries',
'connection_retry_interval',
'build_rate_limit',
'build_active_retries',
'build_retry_interval',
'haproxy_stick_size',
'bind_host',
'bind_port',
'lb_network_interface',
'haproxy_cmd',
'respawn_count',
'respawn_interval',
'client_cert',
'server_ca',
'use_upstart',
'rest_request_conn_timeout',
'rest_request_read_timeout',
'active_connection_max_retries',
'active_connection_rety_interval',
'user_log_format',
]) | DEFAULT_OPTIONS
assert 'controller_worker' in octavia_conf
assert set(octavia_conf.items('controller_worker').keys()) == set([
'workers',
'amp_active_retries',
'amp_active_wait_sec',
'amp_image_id',
'amp_image_tag',
'amp_image_owner_id',
'amp_flavor_id',
'amp_boot_network_list',
'amp_secgroup_list',
'amp_ssh_access_allowed',
'client_ca',
'amphora_driver',
'compute_driver',
'network_driver',
'volume_driver',
'distributor_driver',
'loadbalancer_topology',
'user_data_config_drive',
]) | DEFAULT_OPTIONS
assert 'task_flow' in octavia_conf
assert set(octavia_conf.items('task_flow').keys()) == set([
'engine',
'max_workers',
'disable_revert',
]) | DEFAULT_OPTIONS
assert 'oslo_messaging' in octavia_conf
assert set(octavia_conf.items('oslo_messaging').keys()) == set([
'rpc_thread_pool_size',
'topic',
]) | DEFAULT_OPTIONS
assert 'oslo_middleware' in octavia_conf
assert set(octavia_conf.items('oslo_middleware').keys()) == set([
'enable_proxy_headers_parsing',
]) | DEFAULT_OPTIONS
assert 'house_keeping' in octavia_conf
assert set(octavia_conf.items('house_keeping').keys()) == set([
'spare_check_interval',
'spare_amphora_pool_size',
'cleanup_interval',
'amphora_expiry_age',
'load_balancer_expiry_age',
]) | DEFAULT_OPTIONS
assert 'amphora_agent' in octavia_conf
assert set(octavia_conf.items('amphora_agent').keys()) == set([
'agent_server_ca',
'agent_server_cert',
'agent_server_network_dir',
'agent_server_network_file',
'agent_request_read_timeout',
'agent_tls_protocol',
'amphora_udp_driver',
'admin_log_targets',
'tenant_log_targets',
'user_log_facility',
'administrative_log_facility',
'log_protocol',
'log_retry_count',
'log_retry_interval',
'log_queue_size',
'logging_template_override',
'forward_all_logs',
'disable_local_log_storage',
]) | DEFAULT_OPTIONS
assert 'keepalived_vrrp' in octavia_conf
assert set(octavia_conf.items('keepalived_vrrp').keys()) == set([
'vrrp_advert_int',
'vrrp_check_interval',
'vrrp_fail_count',
'vrrp_success_count',
'vrrp_garp_refresh_interval',
'vrrp_garp_refresh_count',
]) | DEFAULT_OPTIONS
assert 'service_auth' in octavia_conf
assert set(octavia_conf.items('service_auth').keys()) == set([
'memcached_servers',
'cafile',
'auth_type',
]) | DEFAULT_OPTIONS
assert 'nova' in octavia_conf
assert set(octavia_conf.items('nova').keys()) == set([
'service_name',
'region_name',
'endpoint_type',
'ca_certificates_file',
'insecure',
'random_amphora_name_length',
'availability_zone',
'enable_anti_affinity',
'anti_affinity_policy',
]) | DEFAULT_OPTIONS
assert 'cinder' in octavia_conf
assert set(octavia_conf.items('cinder').keys()) == set([
'service_name',
'region_name',
'endpoint_type',
'availability_zone',
'insecure',
'ca_certificates_file',
'volume_size',
'volume_type',
'volume_create_retry_interval',
'volume_create_timeout',
'volume_create_max_retries',
]) | DEFAULT_OPTIONS
assert 'glance' in octavia_conf
assert set(octavia_conf.items('glance').keys()) == set([
'service_name',
'region_name',
'endpoint_type',
'insecure',
'ca_certificates_file',
]) | DEFAULT_OPTIONS
assert 'neutron' in octavia_conf
assert set(octavia_conf.items('neutron').keys()) == set([
'service_name',
'region_name',
'endpoint_type',
'insecure',
'ca_certificates_file',
]) | DEFAULT_OPTIONS
assert 'quotas' in octavia_conf
assert set(octavia_conf.items('quotas').keys()) == set([
'default_load_balancer_quota',
'default_listener_quota',
'default_member_quota',
'default_pool_quota',
'default_health_monitor_quota',
]) | DEFAULT_OPTIONS
assert 'audit' in octavia_conf
assert set(octavia_conf.items('audit').keys()) == set([
'enabled',
'audit_map_file',
'ignore_req_list',
]) | DEFAULT_OPTIONS
assert 'audit_middleware_notifications' in octavia_conf
assert set(octavia_conf.items('audit_middleware_notifications').keys()) == set([
'use_oslo_messaging',
'driver',
'topics',
]) | DEFAULT_OPTIONS
assert 'driver_agent' in octavia_conf
assert set(octavia_conf.items('driver_agent').keys()) == set([
'status_socket_path',
'stats_socket_path',
'get_socket_path',
'status_request_timeout',
'status_max_processes',
'stats_request_timeout',
'stats_max_processes',
'max_process_warning_percent',
'provider_agent_shutdown_timeout',
'enabled_provider_agents',
]) | DEFAULT_OPTIONS
def test_doc_examples():
env = {
'octavia_conf': OctaviaConf(context_wrap(CONF_FILE)),
}
failed, total = doctest.testmod(octavia_module, globs=env)
assert failed == 0
|
1649630
|
from core.models import Person
from ..models import Enrollment
def enrollment_event_box_context(request, event):
enrollment = None
is_enrollment_admin = False
if request.user.is_authenticated:
is_enrollment_admin = event.enrollment_event_meta.is_user_admin(request.user)
try:
person = request.user.person
enrollment = Enrollment.objects.get(event=event, person=person, state__in=[
'NEW',
'ACCEPTED',
])
except (Person.DoesNotExist, Enrollment.DoesNotExist):
pass
return dict(
enrollment=enrollment,
is_enrollment_admin=is_enrollment_admin,
)
|
1649683
|
from troposphere import (
Ref, Output
)
from stacker.blueprints.base import Blueprint
from stacker.util import load_object_from_string
class GenericResourceCreator(Blueprint):
""" Generic Blueprint for creating a resource
Example config - this would create a stack with a single resource in it,
an ec2.Volume resource:
- name: generic-resource-volume
class_path: blueprints.generic.GenericResourceCreator
variables:
Class: ec2.Volume
Output: VolumeId
Properties:
VolumeType: gp2
Size: 5
Encrypted: true
AvailabilityZone: us-east-1b
"""
VARIABLES = {
'Class':
{'type': str,
'description': 'The troposphere class to create, '
'e.g.: ec2.Volume'},
'Output':
{'type': str,
'description': 'The output field that should be created, '
'e.g.: VolumeId'},
'Properties':
{'type': dict,
'description': 'The list of properties to use for the '
'Troposphere class'},
}
def add_cfn_description(self):
""" Boilerplate for CFN Template
*** NOTE *** Template Version Reminder
Make Sure you bump up the template version number above if submitting
updates to the repo. This is the only way we can tell which version of
a template is in place on a running resouce.
"""
template = self.template
template.add_version('2010-09-09')
template.add_description('Generic Resource Creator - 1.0.0')
def setup_resource(self):
""" Setting Up Resource """
template = self.template
variables = self.get_variables()
tclass = variables['Class']
tprops = variables['Properties']
output = variables['Output']
klass = load_object_from_string('troposphere.' + tclass)
instance = klass.from_dict('ResourceRefName', tprops)
template.add_resource(instance)
template.add_output(Output(
output,
Description="A reference to the object created in this blueprint",
Value=Ref(instance)
))
def create_template(self):
""" Create the CFN template """
self.add_cfn_description()
self.setup_resource()
|
1649700
|
from pyparrot.Anafi import Anafi
from pyparrot.DroneVisionGUI import DroneVisionGUI
from pyparrot.Model import Model
import cv2
WRITE_IMAGES = False
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
img = self.vision.get_latest_valid_picture()
if img is not None and WRITE_IMAGES:
cv2.imwrite(f"test_image_{self.index:06d}", img)
self.index += 1
def demo_anafi_user_vision(anafi_vision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
anafi = args[0]
print("Sleeping for 15 seconds, move Anafi around to test vision")
anafi.smart_sleep(15)
print("Closing video stream")
anafi.close_video()
anafi.smart_sleep(5)
print("Disconnecting Anafi")
anafi.disconnect()
if __name__ == "__main__":
anafi = Anafi()
print("Connecting to Anafi...")
if anafi.connect(num_retries=3):
print("Connected to Anafi")
# Update state info
anafi.smart_sleep(1)
anafi.ask_for_state_update()
anafi.smart_sleep(1)
print("Preparing to open video stream")
anafi_vision = DroneVisionGUI(
anafi,
Model.ANAFI,
buffer_size=200,
user_code_to_run=demo_anafi_user_vision,
user_args=(anafi,),
)
user_vision = UserVision(anafi_vision)
anafi_vision.set_user_callback_function(
user_vision.save_pictures, user_callback_args=None
)
print("Opening video stream")
anafi_vision.open_video()
|
1649723
|
class Config(object):
""" Model Configuration
[use_img_feat] options control how we use image features
None not using image features (default)
"concat_bf_lstm" concatenate image feature before LSTM
"concat_af_lstm" concatenate image feature after LSTM
"only_img" image feature only
[combine_typ] how do we combine context feature with candidate feature
"bilinpool" using binlinear pooling (default)
"concat" concatenate features directly
[cls_hidden] number of hidden layers for classifer (all with size 256)
"""
learning_rate = 0.001
learning_rate_decay = 0.9
max_epoch = 30
grad_clip = 1.0
num_layers = 1
num_steps = 15
hidden_size = 512
dropout_prob = 0.5
batch_size = 100
vocab_size = 10004
embedding_size = 300
num_input = 2
use_lstm = True
# How to use Image Feature :
# None | 'concat_bf_lstm' | 'concat_af_lstm' | 'only_img'
use_img_feat= 'concat_af_lstm'
# How to combine context feature:
# 'bilinpool' | 'concat'
combine_typ = 'concat'
# 0 for basic linear classifier
cls_hidden = 0
use_residual = False # Whether use residual connection in LSTM
use_random_human = True # Whether using random human captions transformations
use_random_word = True # Whether using random word replacement
use_word_permutation = True # Whehter using random word permutations
use_mc_samples = True # Whether using Monte Carlo Sampled Captions
def set_no_da(config):
# not using data augmentation durng training.
config.use_random_human = False
config.use_random_word = False
config.use_word_permutation = False
config.use_mc_samples = False
return config
def config_model_coco(config, model_architecture):
config.num_layers = 1 # using 1 LSTM layer
# Linear models
if model_architecture == 'concat_no_img_1_512_0':
config.use_img_feat = None
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'concat_img_1_512_0':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'concat_only_img_1_512_0':
config.use_img_feat = 'only_img'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'concat_img_1_512_0_noda':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 0
config = set_no_da(config)
# Non-linear models with Compact Bilinear Pooling
elif model_architecture == 'bilinear_img_1_512_0':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'bilinear_no_img_1_512_0':
config.use_img_feat = None
config.combine_typ = 'bilinpool'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'bilinear_only_img_1_512_0':
config.use_img_feat = 'only_img'
config.combine_typ = 'bilinpool'
config.hidden_size = 512
config.cls_hidden = 0
elif model_architecture == 'bilinear_img_1_512_0_noda':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.hidden_size = 512
config.cls_hidden = 0
config = set_no_da(config)
# Non-linear models with MLP
elif model_architecture == 'mlp_1_img_1_512_0':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 1
elif model_architecture == 'mlp_1_no_img_1_512_0':
config.use_img_feat = None
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 1
elif model_architecture == 'mlp_1_only_img_1_512_0':
config.use_img_feat = 'only_img'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 1
elif model_architecture == 'mlp_1_img_1_512_0_noda':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'concat'
config.hidden_size = 512
config.cls_hidden = 1
config = set_no_da(config)
else:
raise Exception("Invalid architecture name:%s"%model_architecture)
return config
def config_model_flickr(config, model_architecture):
config.use_random_human = True
config.use_random_word = False
config.use_word_permutation = False
config.use_mc_samples = False
config.batch_size = 50
config.max_epoch = 100
config.learning_rate_decay = 0.98
config.learning_rate = 0.001
config.batch_size = 100
config.test_batch_size = 15000
config.vocab_size = 3441 # Without lemmatization
if model_architecture == 'baseline':
return config
if model_architecture == 'baseline_mlp':
config.use_img_feat = None
config.combine_typ = 'concat'
config.cls_hidden = 1
return config
if model_architecture == 'bilinear':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 1
config.cls_hidden = 0
return config
if model_architecture == 'bilinear_moreLSTM':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 0
return config
if model_architecture == 'bilinear_clf_moreLSTM':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
return config
if model_architecture == 'bilinear_sm':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 1
config.cls_hidden = 0
config.hidden_size = 128
return config
if model_architecture == 'bilinear_moreLSTM_sm':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 0
config.hidden_size = 128
return config
if model_architecture == 'bilinear_clf_moreLSTM':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
return config
if model_architecture == 'bilinear_clf_moreLSTM_sm':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
return config
if model_architecture == 'bilinear_bilinear':
config.use_img_feat = 'bilinpool'
config.combine_typ = 'bilinpool'
config.num_layers = 1
config.cls_hidden = 0
config.hidden_size = 128
return config
# Different Dropout
if model_architecture == 'bilinear_clf_moreLSTM_dropout0.3':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.dropout_prob = 0.3
return config
if model_architecture == 'bilinear_clf_moreLSTM_dropout0.1':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.dropout_prob = 0.3
return config
# Turn the learning rate
if model_architecture == 'bilinear_clf_moreLSTM_lr0.001':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.001
return config
if model_architecture == 'bilinear_clf_moreLSTM_lr0.002':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.002
return config
if model_architecture == 'bilinear_clf_moreLSTM_lr0.0008':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.0008
return config
if model_architecture == 'bilinear_clf_moreLSTM_lr0.0005':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.0005
return config
if model_architecture == 'bilinear_clf_moreLSTM_lr0.0002':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.0002
return config
if model_architecture == 'bilinear_clf_moreLSTM_lr0.0001':
config.use_img_feat = 'concat_af_lstm'
config.combine_typ = 'bilinpool'
config.num_layers = 2
config.cls_hidden = 1
config.hidden_size = 128
config.learning_rate = 0.0001
return config
raise Exception("%s not found"%model_architecture)
|
1649770
|
import codecs, ssl, chardet
from urllib.request import urlopen, Request
db = 'db.txt'
def get(src):
with urlopen(Request(src, None, {'User-Agent': ''}), context=ssl._create_unverified_context()) as site:
text = site.read()
with codecs.open(db, 'w', 'utf-8') as file:
print(text.decode(chardet.detect(text)['encoding']), file=file)
if __name__ == '__main__':
get(input())
print('OK')
|
1649779
|
from typing import List
from typing import Union
import pystac
from gcsfs import GCSFileSystem
from satextractor.models import ExtractionTask
from satextractor.models import Tile
from satextractor.scheduler import create_tasks_by_splits
def get_scheduler(name, **kwargs):
return eval(name)
def gcp_schedule(
tiles: List[Tile],
split_m: int,
item_collection: Union[str, pystac.ItemCollection],
constellations: List[str],
bands: List[str] = None,
interval: int = 1,
n_jobs: int = -1,
verbose: int = 0,
overwrite: bool = False,
storage_path: str = None,
credentials=None,
**kwargs,
) -> List[ExtractionTask]:
fs = GCSFileSystem(token=credentials)
return create_tasks_by_splits(
tiles,
split_m,
item_collection,
constellations,
bands,
interval,
n_jobs,
verbose,
overwrite,
storage_path,
fs.get_mapper,
)
|
1649789
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
from collections import namedtuple
import numpy as np
import random
from ray.rllib.agents.trainer import Trainer
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.policy.policy import Policy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import override
from ray.tune.logger import pretty_print
from mprl.rl.envs.opnspl.poker_br_policy import PokerOracleBestResponsePolicy
from mprl.rl.common.stratego_model import STRATEGO_MODEL
from mprl.rl.common.stratego_preprocessor import STRATEGO_PREPROCESSOR
from mprl.rl.ppo.ppo_custom_eval_trainer import PPOCustomEvalTrainer
from mprl.rl.ppo.ppo_stratego_model_policy import PPOStrategoModelTFPolicy
from mprl.rl.common.util import numpy_unpack_obs
from mprl.rl.envs.opnspl.poker_multiagent_env import POKER_ENV
from mprl.rl.envs.opnspl.measure_exploitability_eval_callback import openspiel_policy_from_nonlstm_rllib_policy
from mprl.rl.envs.opnspl.util import policy_to_dict_but_we_can_actually_use_it
from mprl.rl.envs.opnspl.poker_multiagent_env import PokerMultiAgentEnv
from open_spiel.python.policy import tabular_policy_from_policy
from open_spiel.python import policy
import pyspiel
tf = try_import_tf()
RL_BR_POLICY = "rl_br_policy"
ORACLE_BR_POLICY = "oracle_br_policy"
EXPLOIT_POLICY = "exploit_policy"
# Used to return tuple actions as a list of batches per tuple element
TupleActions = namedtuple("TupleActions", ["batches"])
POLICY_TARGETS = "policy_targets"
OBSERVATION = 'observation'
VALID_ACTIONS_MASK = 'valid_actions_mask'
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
class PokerOpenSpeilPolicy(Policy):
@override(Policy)
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space=observation_space, action_space=action_space, config=config)
if config["custom_preprocessor"]:
self.preprocessor = ModelCatalog.get_preprocessor_for_space(
observation_space=self.observation_space.original_space,
options={"custom_preprocessor": config["custom_preprocessor"]})
else:
raise ValueError("Custom preprocessor for PokerCFRPolicy needs to be specified on its passed config.")
env_id = config['env']
assert env_id == POKER_ENV
self.policy_dict = None
def set_policy_dict(self, policy_dict):
self.policy_dict = policy_dict
def _get_action_probs_for_infoset(self, infoset):
action_probs = np.zeros(shape=(self.action_space.n,), dtype=np.float32)
policy_lookup_val = self.policy_dict[str(np.asarray(infoset, dtype=np.float32).tolist())]
for action, prob in policy_lookup_val:
action_probs[action] = prob
return action_probs
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Compute actions for the current policy.
Arguments:
obs_batch (np.ndarray): batch of observations
state_batches (list): list of RNN state input batches, if any
prev_action_batch (np.ndarray): batch of previous action values
prev_reward_batch (np.ndarray): batch of previous rewards
info_batch (info): batch of info objects
episodes (list): MultiAgentEpisode for each obs in obs_batch.
This provides access to all of the internal episode state,
which may be useful for model-based or multiagent algorithms.
kwargs: forward compatibility placeholder
Returns:
actions (np.ndarray): batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (list): list of RNN state output batches, if any, with
shape like [STATE_SIZE, BATCH_SIZE].
info (dict): dictionary of extra feature batches, if any, with
shape like {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
obs_batch = numpy_unpack_obs(obs=np.asarray(obs_batch), space=self.observation_space.original_space,
preprocessor=self.preprocessor)
info_states = obs_batch["partial_observation"]
valid_actions = obs_batch['valid_actions_mask']
actions = []
policy_probs = []
for info_state, valid_mask in zip(info_states, valid_actions):
if self.policy_dict is None:
action_probs = valid_mask.copy() / sum(valid_mask)
else:
action_probs = self._get_action_probs_for_infoset(info_state)
action = np.random.choice(range(self.action_space.n), p=action_probs)
assert valid_mask[action] == 1.0
actions.append(action)
policy_probs.append(action_probs)
return actions, [], {POLICY_TARGETS: np.asarray(policy_probs)}
def compute_gradients(self, postprocessed_batch):
"""Computes gradients against a batch of experiences.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
grads (list): List of gradient output values
info (dict): Extra policy-specific values
"""
pass
def apply_gradients(self, gradients):
"""Applies previously computed gradients.
Either this or learn_on_batch() must be implemented by subclasses.
"""
pass
def get_weights(self):
"""Returns model weights.
Returns:
weights (obj): Serializable copy or view of model weights
"""
return None
def set_weights(self, weights):
"""Sets model weights.
Arguments:
weights (obj): Serializable copy or view of model weights
"""
pass
def get_initial_state(self):
"""Returns initial RNN state for the current policy."""
return []
def get_state(self):
"""Saves all local state.
Returns:
state (obj): Serialized local state.
"""
return self.get_weights()
def set_state(self, state):
"""Restores all local state.
Arguments:
state (obj): Serialized local state.
"""
self.set_weights(state)
def on_global_var_update(self, global_vars):
"""Called on an update to global vars.
Arguments:
global_vars (dict): Global variables broadcast from the driver.
"""
pass
def export_model(self, export_dir):
"""Export Policy to local directory for serving.
Arguments:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def export_checkpoint(self, export_dir):
"""Export Policy checkpoint to local directory.
Argument:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def get_openspeil_format_rl_br_policy(game_name, br_player_id, policy_to_exploit, policy_to_exploit_player_id):
ray.init(local_mode=True, ignore_reinit_error=True)
poker_game_version = game_name
observation_mode = "partially_observable"
poker_env_config = {
'version': poker_game_version,
'fixed_players': True
}
make_env_fn = lambda env_config: PokerMultiAgentEnv(env_config)
temp_env = make_env_fn(poker_env_config)
obs_space = temp_env.observation_space
act_space = temp_env.action_space
model_config = {
# === Options for custom models ===
# Name of a custom preprocessor to use
"custom_preprocessor": STRATEGO_PREPROCESSOR,
# Name of a custom model to use
"custom_model": STRATEGO_MODEL,
"custom_options": {
"mask_invalid_actions": True,
"observation_mode": observation_mode,
"q_fn": False
},
}
def train_policy_mapping_fn(agent_id):
if agent_id == br_player_id:
# this is just to quickly check that we're matching the Oracle BR by having it
# also play some games and report win stats too
# TODO: you can remove this if-statement if you dont care about verifying against the oracle BR
if random.random() < 0.1:
return ORACLE_BR_POLICY
return RL_BR_POLICY
elif agent_id == policy_to_exploit_player_id:
return EXPLOIT_POLICY
else:
raise ValueError(f"The env requested a policy for a player ID of {agent_id} "
f"but the BR policy has a player ID of {br_player_id} "
f"and the exploit policy has player ID of {policy_to_exploit_player_id}")
trainer_config = {
"log_level": "INFO",
"num_workers": 0, # 0 means a single worker instance in the same process as the optimizer
"memory_per_worker": 1419430400,
"num_gpus": 0, # (GPUs for training) not using gpus for anything by default
"num_gpus_per_worker": 0, # (GPUs per experience gathering worker process, can be a fraction)
"num_envs_per_worker": 1,
"env": POKER_ENV,
"env_config": poker_env_config,
"multiagent": {
"policies": {
RL_BR_POLICY: (PPOStrategoModelTFPolicy, obs_space, act_space, {
# the config dicts in these "policies" override any non-policy-specific params
'model': model_config,
"lr": 0.001,
}),
# TODO: you can remove the ORACLE BR policy here if you dont want to verify against it
# (there are two other TODO's in this file with Oracle BR stuff you can remove)
ORACLE_BR_POLICY: (PokerOracleBestResponsePolicy, obs_space, act_space, {
'custom_preprocessor': STRATEGO_PREPROCESSOR,
}),
EXPLOIT_POLICY: (PokerOpenSpeilPolicy, obs_space, act_space, {
'custom_preprocessor': STRATEGO_PREPROCESSOR,
}),
},
"policy_mapping_fn": train_policy_mapping_fn,
"policies_to_train": [RL_BR_POLICY],
},
"metrics_smoothing_episodes": 1000, # all reported RLLib metrics are averaged over this size episode window
"gamma": 1.0, # discount
"num_sgd_iter": 10, # train over train batch this many times each train() call
"sgd_minibatch_size": 128, #break train batch in to this size minibatches
"train_batch_size": 500,
"sample_batch_size": 10, # each worker returns chunks of this size (PPO continues gathering exp until train_batch_size is gathered in total among all policies)
"simple_optimizer": True, # non-simple optimizer does multi-gpu/preloading fancy stuff
"model": {
"conv_filters": [],
"fcnet_hiddens": [40, 40, 40], # poker network size here
},
}
trainer_class = PPOCustomEvalTrainer
trainer: Trainer = trainer_class(config=trainer_config)
# For technical reasons (can't pickle certain things),
# I have to set the policy probs for openspiel-based exploit policy here
def set_openspeil_exploit_policy_probs(worker):
game = pyspiel.load_game(game_name)
worker.policy_map[EXPLOIT_POLICY].set_policy_dict(
policy_to_dict_but_we_can_actually_use_it(player_policy=policy_to_exploit,
game=game,
player_id=policy_to_exploit_player_id))
trainer.workers.foreach_worker(set_openspeil_exploit_policy_probs)
###################
# For technical reasons (can't pickle certain things),
# I have to set the policy probs for openspiel-based BR policy here
# TODO: you can remove this chunk of logic if you remove the other two bits of Oracle BR code in earlier lines
local_br_policy = trainer.workers.local_worker().policy_map[ORACLE_BR_POLICY]
local_exploit_rllib_policy = trainer.workers.local_worker().policy_map[EXPLOIT_POLICY]
br_policy_probs_dict = local_br_policy.compute_best_response(policy_to_exploit=local_exploit_rllib_policy,
br_only_as_player_id=br_player_id)
def set_openspeil_oracle_br_policy_probs(worker):
worker.policy_map[ORACLE_BR_POLICY].set_policy_dict(br_policy_probs_dict)
trainer.workers.foreach_worker(set_openspeil_oracle_br_policy_probs)
####################
iterations = 100
for it in range(1, iterations + 1):
result = trainer.train()
print(f"Iteration {it} out of {iterations}")
print(pretty_print(result))
game = pyspiel.load_game(game_name)
open_spiel_policy_from_callable = openspiel_policy_from_nonlstm_rllib_policy(
openspiel_game=game, poker_game_version=poker_game_version,
rllib_policy=trainer.workers.local_worker().policy_map[RL_BR_POLICY])
return tabular_policy_from_policy(game=game, policy=open_spiel_policy_from_callable)
if __name__ == '__main__':
game_name = "kuhn_poker"
game = pyspiel.load_game(game_name)
tabular_policy = policy.TabularPolicy(game)
# rl_br_policy should have the same interface as a openspeil br policy from something like
# open_spiel.python.algorithms.best_response.BestResponsePolicy
rl_br_policy = get_openspeil_format_rl_br_policy(game_name=game_name,
br_player_id=0,
policy_to_exploit_player_id=1,
policy_to_exploit=tabular_policy)
|
1649866
|
from django.shortcuts import get_object_or_404
from facebook import GraphAPI, GraphAPIError
from raven.contrib.django.raven_compat.models import client
from canvas.exceptions import InvalidFacebookAccessToken
from canvas.templatetags.jinja_base import render_jinja_to_string
from canvas.view_guards import require_staff, require_user
from drawquest.api_decorators import api_decorator
from drawquest.apps.twitter.models import Twitter, TwitterError, TwitterDuplicateStatusError
urlpatterns = []
api = api_decorator(urlpatterns)
@api('share_web_profile')
@require_user
def share_web_profile(request, message,
twitter_access_token=None, twitter_access_token_secret=None,
facebook_access_token=None):
if twitter_access_token is not None and twitter_access_token_secret is not None:
try:
Twitter(twitter_access_token, twitter_access_token_secret).tweet(message)
except TwitterDuplicateStatusError as e:
pass
except TwitterError as e:
client.captureException()
if facebook_access_token:
graph = GraphAPI(facebook_access_token)
try:
graph.put_object('me', 'feed', message=message)
except GraphAPIError:
raise InvalidFacebookAccessToken("Invalid Facebook access token, please re-auth with Facebook.")
except IOError:
client.captureException()
|
1649881
|
import numpy as np
from itertools import product
import depthai as dai
from math import gcd
from pathlib import Path
from FPS import FPS, now
import cv2
import os, sys, re
SCRIPT_DIR = Path(__file__).resolve().parent
DEFAULT_YUNET_MODEL = str(SCRIPT_DIR / "models/face_detection_yunet_180x320_sh4.blob")
def find_isp_scale_params(size, is_height=True):
"""
Find closest valid size close to 'size' and and the corresponding parameters to setIspScale()
This function is useful to work around a bug in depthai where ImageManip is scrambling images that have an invalid size
is_height : boolean that indicates if the value is the height or the width of the image
Returns: valid size, (numerator, denominator)
"""
# We want size >= 288
if size < 288:
size = 288
# We are looking for the list on integers that are divisible by 16 and
# that can be written like n/d where n <= 16 and d <= 63
if is_height:
reference = 1080
other = 1920
else:
reference = 1920
other = 1080
size_candidates = {}
for s in range(16,reference,16):
f = gcd(reference, s)
n = s//f
d = reference//f
if n <= 16 and d <= 63 and int(round(other * n / d) % 2 == 0):
size_candidates[s] = (n, d)
# What is the candidate size closer to 'size' ?
min_dist = -1
for s in size_candidates:
dist = abs(size - s)
if min_dist == -1:
min_dist = dist
candidate = s
else:
if dist > min_dist: break
candidate = s
min_dist = dist
return candidate, size_candidates[candidate]
class YuNet:
"""
YuNet Face Detector : https://github.com/opencv/opencv_zoo/tree/dev/models/face_detection_yunet
Arguments:
- model: path to Yunet blob
- model_resolution: None or string "HxW" where H and W are the Yunet input resolution (Height, Width)
If None, the resolution is inferred from the model path "face_detection_yunet_HxW.blob"
- input_src: frame source,
- "rgb" or None: OAK* internal color camera,
- "rgb_laconic": same as "rgb" but without sending the frames to the host,
- a file path of an image or a video,
- an integer (eg 0) for a webcam id,
- conf_threshold: detection score threshold [0..1],
- nms_threshold: Non Maximal Suppression threshold [0..1],
- internal_fps : when using the internal color camera as input source, set its FPS to this value (calling setFps()).
- internal_frame_height : when using the internal color camera, set the frame height (calling setIspScale()).
The width is calculated accordingly to height and depends on value of 'crop'
- stats : boolean, when True, display some statistics when exiting.
- trace: boolean, when True print some debug messages
"""
def __init__(self,
model = str(DEFAULT_YUNET_MODEL),
model_resolution=None,
input_src=None,
conf_threshold=0.6,
nms_threshold=0.3,
top_k = 50,
internal_fps=50,
internal_frame_height=640,
stats=False,
trace=False,
):
self.model = model
if not os.path.isfile(model):
print(f"Model path '{model}' does not exist !!!")
sys.exit()
if model_resolution is None: model_resolution = model
# Try to infer from the model path
match = re.search(r'.*?(\d+)x(\d+).*', model)
if not match:
print(f"Impossible to infer the model input resolution from model name '{model}' does not exist !!!")
sys.exit()
self.nn_input_w = int(match.group(2))
self.nn_input_h = int(match.group(1))
print(f"Model : {self.model} - Input resolution: {self.nn_input_h}x{self.nn_input_w}")
self.internal_fps = internal_fps
self.conf_threshold = conf_threshold
self.nms_threshold = nms_threshold
self.top_k = top_k
self.stats = stats
self.trace = trace
self.min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.steps = [8, 16, 32, 64]
self.variance = [0.1, 0.2]
# Generate priors
self.prior_gen()
self.device = dai.Device()
if input_src is None or input_src == "rgb" or input_src == "rgb_laconic":
self.input_type = "rgb" # OAK* internal color camera
self.laconic = input_src == "rgb_laconic" # Camera frames are not sent to the host
self.video_fps = self.internal_fps # Used when saving the output in a video file. Should be close to the real fps
width, self.scale_nd = find_isp_scale_params(internal_frame_height * 1920 / 1080, is_height=False)
self.img_h = int(round(1080 * self.scale_nd[0] / self.scale_nd[1]))
self.img_w = int(round(1920 * self.scale_nd[0] / self.scale_nd[1]))
print(f"Internal camera image size: {self.img_w} x {self.img_h}")
elif input_src.endswith('.jpg') or input_src.endswith('.png') :
self.input_type= "image"
self.img = cv2.imread(input_src)
self.video_fps = 25
self.img_h, self.img_w = self.img.shape[:2]
else:
self.input_type = "video"
if input_src.isdigit():
input_type = "webcam"
input_src = int(input_src)
self.cap = cv2.VideoCapture(input_src)
self.video_fps = int(self.cap.get(cv2.CAP_PROP_FPS))
self.img_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.img_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("Video FPS:", self.video_fps)
# We want to keep aspect ratio of the input images
# So we may need to pad the images before feeding them to the model
# 'padded_size' is the size of the image once padded.
# Note that the padding when used is not applied on both sides (top and bottom,
# or left and right) but only on the side opposite to the origin (top or left).
# It makes calculations easier.
self.iwnh_ihnw = self.img_w * self.nn_input_h / (self.img_h * self.nn_input_w)
if self.iwnh_ihnw >= 1:
self.padded_size = np.array((self.img_w, self.img_h * self.iwnh_ihnw)).astype(int)
else:
self.padded_size = np.array((self.img_w / self.iwnh_ihnw, self.img_h)).astype(int)
print(f"Source image size: {self.img_w} x {self.img_h}")
print(f"Padded image size: {self.padded_size[0]} x {self.padded_size[1]}")
# Define and start pipeline
usb_speed = self.device.getUsbSpeed()
self.device.startPipeline(self.create_pipeline())
print(f"Pipeline started - USB speed: {str(usb_speed).split('.')[-1]}")
# Define data queues
if self.input_type == "rgb":
if not self.laconic:
self.q_video = self.device.getOutputQueue(name="cam_out", maxSize=1, blocking=False)
if self.trace:
self.q_manip_out = self.device.getOutputQueue(name="manip_out", maxSize=1, blocking=False)
else:
self.q_nn_in = self.device.getInputQueue(name="nn_in")
self.q_nn_out = self.device.getOutputQueue(name="nn_out", maxSize=4, blocking=False)
self.fps = FPS()
self.glob_rtrip_time = 0
self.glob_posprocessing_time = 0
def create_pipeline(self):
print("Creating pipeline...")
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version = dai.OpenVINO.Version.VERSION_2021_4)
if self.input_type == "rgb":
# ColorCamera
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setInterleaved(False)
cam.setIspScale(self.scale_nd[0], self.scale_nd[1])
cam.setFps(self.internal_fps)
cam.setPreviewSize(self.img_w, self.img_h)
if not self.laconic:
cam_out = pipeline.createXLinkOut()
cam_out.setStreamName("cam_out")
cam_out.input.setQueueSize(1)
cam_out.input.setBlocking(False)
cam.video.link(cam_out.input)
# The frame is padded to have the same ratio width/height
# as the model input, and resized to the model input resolution
print("Creating Image Manip node...")
manip = pipeline.createImageManip()
manip.setMaxOutputFrameSize(self.nn_input_w*self.nn_input_h*3)
manip.inputImage.setQueueSize(1)
manip.inputImage.setBlocking(False)
points = [
[0, 0],
[self.padded_size[0], 0],
[self.padded_size[0], self.padded_size[1]],
[0, self.padded_size[1]]]
point2fList = []
for p in points:
pt = dai.Point2f()
pt.x, pt.y = p[0], p[1]
point2fList.append(pt)
manip.initialConfig.setWarpTransformFourPoints(point2fList, False)
manip.initialConfig.setResize(self.nn_input_w, self.nn_input_h)
cam.preview.link(manip.inputImage)
# For debugging
if self.trace:
manip_out = pipeline.createXLinkOut()
manip_out.setStreamName("manip_out")
manip.out.link(manip_out.input)
# Define YUNET model
print("Creating YUNET Neural Network...")
nn = pipeline.createNeuralNetwork()
nn.setBlobPath(self.model)
if self.input_type == "rgb":
manip.out.link(nn.input)
else:
nn_in = pipeline.createXLinkIn()
nn_in.setStreamName("nn_in")
nn_in.out.link(nn.input)
# YUNET output
nn_out = pipeline.createXLinkOut()
nn_out.setStreamName("nn_out")
nn.out.link(nn_out.input)
print("Pipeline created.")
return pipeline
def prior_gen(self):
w, h = self.nn_input_w, self.nn_input_h
feature_map_2th = [int(int((h + 1) / 2) / 2),
int(int((w + 1) / 2) / 2)]
feature_map_3th = [int(feature_map_2th[0] / 2),
int(feature_map_2th[1] / 2)]
feature_map_4th = [int(feature_map_3th[0] / 2),
int(feature_map_3th[1] / 2)]
feature_map_5th = [int(feature_map_4th[0] / 2),
int(feature_map_4th[1] / 2)]
feature_map_6th = [int(feature_map_5th[0] / 2),
int(feature_map_5th[1] / 2)]
feature_maps = [feature_map_3th, feature_map_4th,
feature_map_5th, feature_map_6th]
priors = []
for k, f in enumerate(feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
for min_size in min_sizes:
s_kx = min_size / w
s_ky = min_size / h
cx = (j + 0.5) * self.steps[k] / w
cy = (i + 0.5) * self.steps[k] / h
priors.append([cx, cy, s_kx, s_ky])
print("Priors length =", len(priors))
self.priors = np.array(priors, dtype=np.float32)
def decode(self, inference):
# print(inference.getAllLayerNames())
loc = np.array(inference.getLayerFp16("loc"), dtype=np.float32).reshape(-1, 14)
conf = np.array(inference.getLayerFp16("conf"), dtype=np.float32).reshape(-1, 2)
iou_scores = np.array(inference.getLayerFp16("iou"), dtype=np.float32)
# get score
cls_scores = conf[:, 1]
# clamp
idx = np.where(iou_scores < 0.)
iou_scores[idx] = 0.
idx = np.where(iou_scores > 1.)
iou_scores[idx] = 1.
scores = np.sqrt(cls_scores * iou_scores)
scores = scores[:, np.newaxis]
# get bboxes
bboxes = np.hstack((
(self.priors[:, 0:2] + loc[:, 0:2] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 2:4] * np.exp(loc[:, 2:4] * self.variance)) * self.padded_size
))
# (x_c, y_c, w, h) -> (x1, y1, w, h)
bboxes[:, 0:2] -= bboxes[:, 2:4] / 2
# get landmarks
landmarks = np.hstack((
(self.priors[:, 0:2] + loc[:, 4: 6] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 6: 8] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 8:10] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size
))
dets = np.hstack((bboxes, landmarks, scores))
return dets
def save_inference_to_npz(self, inference):
loc = np.array(inference.getLayerFp16("loc"), dtype=np.float32).reshape(-1, 14)
conf = np.array(inference.getLayerFp16("conf"), dtype=np.float32).reshape(-1, 2)
iou = np.array(inference.getLayerFp16("iou"), dtype=np.float32)
np.savez("models/build/yunet_output.npz", loc=loc, conf=conf, iou=iou, w=self.nn_input_w, h=self.nn_input_h)
def postprocess(self, inference):
# Decode
dets = self.decode(inference)
# NMS
keep_idx = cv2.dnn.NMSBoxes(
bboxes=dets[:, 0:4].tolist(),
scores=dets[:, -1].tolist(),
score_threshold=self.conf_threshold,
nms_threshold=self.nms_threshold,
top_k=self.top_k
) # box_num x class_num
if len(keep_idx) > 0:
dets = dets[keep_idx]
# If opencv >= 4.5.4.58, NMSBoxes returns Nx1x15
# Else, NMSBoxes returns 1x15
if len(dets.shape) > 2:
dets = np.squeeze(dets, axis=1)
return dets # [:self.keep_top_k]
else:
return np.empty(shape=(0, 15))
def next_frame(self):
"""
Return:
- frame: source input frame,
- faces: detected faces as a 2D numpy arrays of dim (N, 15) with N = number of faces:
- faces[:,0:4] represents the bounding box (x,y,width,height),
- faces[:,4:14] represents the 5 facial landmarks coordinates (x,y),
- faces[:,15] is the detection score.
"""
self.fps.update()
if self.input_type == "rgb":
if self.laconic:
frame = np.zeros((self.img_h, self.img_w, 3), dtype=np.uint8)
else:
# Read color frame from the device
in_video = self.q_video.get()
frame = in_video.getCvFrame()
else:
if self.input_type == "image":
frame = self.img.copy()
else:
ok, frame = self.cap.read()
if not ok:
return None, None
# Send color frame to the device
# The frame is padded to have the same ratio width/height
# as the model input, and resized to the model input resolution
padded = cv2.copyMakeBorder(frame,
0,
self.padded_size[1] - self.img_h,
0,
self.padded_size[0] - self.img_w,
cv2.BORDER_CONSTANT)
padded = cv2.resize(padded, (self.nn_input_w, self.nn_input_h), interpolation=cv2.INTER_AREA)
if self.trace:
cv2.imshow("NN input", padded)
frame_nn = dai.ImgFrame()
frame_nn.setTimestamp(now())
frame_nn.setWidth(self.nn_input_w)
frame_nn.setHeight(self.nn_input_h)
frame_nn.setData(padded.transpose(2, 0, 1))
self.q_nn_in.send(frame_nn)
rtrip_time = now()
# Get model inference
inference = self.q_nn_out.get()
_now = now()
if self.input_type != "rgb":
self.glob_rtrip_time += _now - rtrip_time
faces = self.postprocess(inference)
self.glob_posprocessing_time = now() - _now
# For debugging
if self.trace and self.input_type == "rgb":
manip = self.q_manip_out.get()
manip = manip.getCvFrame()
cv2.imshow("NN input", manip)
return frame, faces
def exit(self):
self.device.close()
# Print some stats
if self.stats:
nb_frames = self.fps.nb_frames()
print(f"FPS : {self.fps.get_global():.1f} f/s (# frames = {nb_frames})")
if self.input_type != "rgb":
print(f"Round trip (send frame + get back inference result) : {self.glob_rtrip_time/nb_frames*1000:.1f} ms")
print(f"Post processing time (on the host) : {self.glob_posprocessing_time/nb_frames*1000:.1f} ms")
|
1649899
|
from gevent.pywsgi import WSGIServer
from app import app, db
db.create_all()
http_server = WSGIServer(('', 9090), app)
http_server.serve_forever()
|
1649905
|
class Instruccion:
'''This is an abstract class'''
class Imprimir(Instruccion) :
'''
Esta clase representa la instrucción imprimir.
La instrucción imprimir únicamente tiene como parámetro una cadena
'''
def __init__(self, cad) :
self.cad = cad
class Mientras(Instruccion) :
'''
Esta clase representa la instrucción mientras.
La instrucción mientras recibe como parámetro una expresión lógica y la lista
de instrucciones a ejecutar si la expresión lógica es verdadera.
'''
def __init__(self, expLogica, instrucciones = []) :
self.expLogica = expLogica
self.instrucciones = instrucciones
class Definicion(Instruccion) :
'''
Esta clase representa la instrucción de definición de variables.
Recibe como parámetro el nombre del identificador a definir
'''
def __init__(self, id) :
self.id = id
class Asignacion(Instruccion) :
'''
Esta clase representa la instrucción de asignación de variables
Recibe como parámetro el identificador a asignar y el valor que será asignado.
'''
def __init__(self, id, expNumerica) :
self.id = id
self.expNumerica = expNumerica
class If(Instruccion) :
'''
Esta clase representa la instrucción if.
La instrucción if recibe como parámetro una expresión lógica y la lista
de instrucciones a ejecutar si la expresión lógica es verdadera.
'''
def __init__(self, expLogica, instrucciones = []) :
self.expLogica = expLogica
self.instrucciones = instrucciones
class IfElse(Instruccion) :
'''
Esta clase representa la instrucción if-else.
La instrucción if-else recibe como parámetro una expresión lógica y la lista
de instrucciones a ejecutar si la expresión lógica es verdadera y otro lista de instrucciones
a ejecutar si la expresión lógica es falsa.
'''
def __init__(self, expLogica, instrIfVerdadero = [], instrIfFalso = []) :
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.instrIfFalso = instrIfFalso
|
1649909
|
import optimizer_plots as plots
import cPickle as pickle
import copy
import json
import math
import os
import shutil
import time
import warnings
import sys
import numpy as np
import skopt
from scipy.optimize import fmin_l_bfgs_b
from six.moves import configparser
from sklearn import clone
from sklearn.externals.joblib import Parallel, delayed
from skopt.acquisition import _gaussian_acquisition, gaussian_acquisition_1D
from skopt.space import space as skopt_space
from skopt.space import Space
from skopt.utils import is_2Dlistlike, is_listlike, create_result, normalize_dimensions
from skopt.learning.gaussian_process.kernels import ConstantKernel
from skopt.learning.gaussian_process.kernels import HammingKernel
from skopt.learning.gaussian_process.kernels import Matern
from estimators import BoundedGaussianProcessRegressor
from utils import check_parameter_count, check_parameter_count_for_sample, distance, partial_dependence_valid_samples
class HyperParamOptimizer(skopt.Optimizer):
def __init__(self, hyper_param_conf, command, expdir, exp_recipe_dir, recipe, computing, exp_proposal_watch_dir=None):
base_estimator = 'GP'
self.hyper_param_conf = hyper_param_conf
self.command = command
self.expdir = expdir
self.exp_recipe_dir = exp_recipe_dir
self.recipe = recipe
self.computing = computing
self.model_cfg = configparser.ConfigParser()
self.model_cfg.read(os.path.join(self.recipe, 'model.cfg'))
# read the hyper parameter file
hyper_param_cfg = configparser.ConfigParser()
hyper_param_cfg.read(hyper_param_conf)
hyper_info = dict(hyper_param_cfg.items('info'))
self.hyper_param_names = hyper_info['hyper_params'].split(' ')
self.num_iters = int(hyper_info['num_iters'])
self.n_initial_points = int(hyper_info['n_initial_points'])
self.n_initial_points_to_start = int(hyper_info['n_initial_points_to_start'])
self.max_parallel_jobs = int(hyper_info['max_parallel_jobs'])
self.selected_segment_length = hyper_info['segment_length']
self.selected_task = hyper_info['task']
if 'adapt_hyper_param' in hyper_info:
self.adapt_param = {
'param_name': hyper_info['adapt_hyper_param'], 'param_thr': int(hyper_info['param_thr']),
'par_cnt_scheme': hyper_info['par_cnt_scheme']}
if 'parts_to_consider_for_cnt' in hyper_info:
self.adapt_param['parts_to_consider_for_cnt'] = hyper_info['parts_to_consider_for_cnt'].split(' ')
else:
self.adapt_param = None
hyper_param_dict = dict()
skopt_dims = []
for par_name in self.hyper_param_names:
par_dict = dict(hyper_param_cfg.items(par_name))
par_type = par_dict['type']
if par_type == 'Integer':
skopt_dim = skopt_space.Integer(
low=int(par_dict['min']), high=int(par_dict['max']), name=par_name)
elif par_type == 'Real':
skopt_dim = skopt_space.Real(
low=float(par_dict['min']), high=float(par_dict['max']), name=par_name)
elif par_type == 'Categorical':
skopt_dim = skopt_space.Categorical(categories=par_dict['categories'].split(' '), name=par_name)
else:
raise ValueError('Type %s is not a valid parameter type' % par_type)
hyper_param_dict[par_name] = par_dict
skopt_dims.append(skopt_dim)
self.hyper_param_dict = hyper_param_dict
self.skopt_dims = skopt_dims
self.last_result = None
# self.all_results = []
self.start_new_run_flag = True
self.iter_ind = 0
self.watch_list = dict()
self.all_dim_values = []
self.all_losses = dict()
self.n_job_running = 0
self.n_initial_points_started = 0
self.n_unsuitable_points_for_estimator = 0
self.max_n_unsuitable_points_for_estimator = 10000
self.unsuitable_runs = []
self.lost_runs = []
self.exp_proposal_watch_dir = exp_proposal_watch_dir
self.use_proposal_run = False
self.proposed_loss_runs = []
if 'artificial_high_loss' in hyper_info:
self.artificial_high_loss = float(hyper_info['artificial_high_loss'])
else:
self.artificial_high_loss = 0.170
# only 0.25% of the point sample in the hyper space are wanted (since they lead to rougly the wanted amount of
# trainable parameters)
self.acq_optimizer_kwargs = {'n_points': 4000000}
if 'debug' in expdir:
self.acq_optimizer_kwargs = {'n_points': 4000}
if base_estimator == 'boundedGP':
# Make own estimator based on Gaussian Process Regressor.
if skopt_dims is not None:
space = Space(skopt_dims)
space = Space(normalize_dimensions(space.dimensions))
n_dims = space.transformed_n_dims
is_cat = space.is_categorical
else:
raise ValueError("Expected a Space instance, not None.")
cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
# only special if *all* dimensions are categorical
if is_cat:
other_kernel = HammingKernel(length_scale=np.ones(n_dims))
else:
other_kernel = Matern(
length_scale=np.ones(n_dims),
length_scale_bounds=[(0.01, 100)] * n_dims, nu=2.5)
base_estimator = BoundedGaussianProcessRegressor(
space, self.hyper_param_names, self.adapt_param, kernel=cov_amplitude * other_kernel, normalize_y=True,
noise="gaussian", n_restarts_optimizer=2)
super(HyperParamOptimizer, self).__init__(
skopt_dims, base_estimator=base_estimator, n_initial_points=self.n_initial_points,
acq_optimizer_kwargs=self.acq_optimizer_kwargs)
def __call__(self):
print '%d Runs have finished and %d are still going. Doing %d more' \
% (self.iter_ind - self.n_unsuitable_points_for_estimator - len(self.watch_list) , len(self.watch_list),
self.num_iters-(self.iter_ind - self.n_unsuitable_points_for_estimator))
while (self.iter_ind - self.n_unsuitable_points_for_estimator - len(self.watch_list) ) < self.num_iters:
# check if the user proposed hyper parameter values (and potentially a corresponding validation loss)
if self.exp_proposal_watch_dir is not None:
self.use_proposal_run = self.watch_proposal_dir()
if self.start_new_run_flag or self.use_proposal_run:
# start (a) new run(s) if allowed
self.start_new_runs()
self.checkpoint()
else:
time.sleep(0.5)
# check whether any run has finished (if a run has not finished and check_jobs is True, check whether
# the job is still present in Condor)
self.check_watch_files()
# check whether a new run should be stared
if \
(
self.n_job_running < self.max_parallel_jobs or
self.n_initial_points_started < self.n_initial_points_to_start
) and self.iter_ind < (self.num_iters + self.n_unsuitable_points_for_estimator):
self.start_new_run_flag = True
else:
self.start_new_run_flag = False
print 'Ran all requested %d experiments' % self.num_iters
def watch_proposal_dir(self):
files_in_dir = [
f for f in os.listdir(self.exp_proposal_watch_dir)
if os.path.isfile(os.path.join(self.exp_proposal_watch_dir, f))]
all_x_vals = []
only_pars = [True] * len(files_in_dir)
for file_ind, file_in_dir in enumerate(files_in_dir):
full_file = os.path.join(self.exp_proposal_watch_dir, file_in_dir)
with open(full_file, 'r') as fid:
tmp = fid.read().split('\n')
x_str_vals = tmp[0].split(',')
if len(x_str_vals) != len(self.skopt_dims):
raise ValueError(
'%d values were proposed for %d hyper parameters in %s' %
(len(x_str_vals), len(self.skopt_dims), file_in_dir))
x_vals = []
for ind, dim in enumerate(self.skopt_dims):
if isinstance(dim, skopt_space.Integer):
x_vals.append(int(x_str_vals[ind]))
elif isinstance(dim, skopt_space.Real):
x_vals.append(float(x_str_vals[ind]))
elif isinstance(dim, skopt_space.Categorical):
x_vals.append(x_str_vals[ind])
else:
raise ValueError('Unexpected value type')
all_x_vals.append(x_vals)
# if a loss is found as well, tell the estimator
if len(tmp) > 1 and tmp[1] != '':
y_val = float(tmp[1])
only_pars[file_ind] = False
self.iter_ind += 1
self.n_initial_points_started += 1
self.all_dim_values.append(x_vals)
self.proposed_loss_runs.append(self.iter_ind)
self.process_loss(y_val, ind)
self.checkpoint()
os.remove(full_file)
# run one proposed hyper parameter at a time
tmp = [ind for ind, only_par in enumerate(only_pars) if only_par]
if len(tmp) > 0:
chosen_run = tmp[0]
chosen_x_vals = all_x_vals[chosen_run]
self.proposal_run_vals = chosen_x_vals
use_proposal_run = True
full_file = os.path.join(self.exp_proposal_watch_dir, files_in_dir[chosen_run])
os.remove(full_file)
else:
use_proposal_run = False
return use_proposal_run
def start_new_runs(self):
if self.use_proposal_run:
# run the proposed values by the user
dim_values, fixed_suitable_values = self.adapt_hyper_param(self.proposal_run_vals)
self.proposal_run_vals = []
if not fixed_suitable_values:
print 'Proposed values are not allowed! Ignoring them'
return
self.n_initial_points_started += 1
self.use_proposal_run = False
self.start_new_run(dim_values)
elif self.n_initial_points_started < self.n_initial_points_to_start:
# for the first n_initial_points_to_start, actively look for valid hyper param values, using an adaptation
# technique.
fixed_suitable_values = False
while not fixed_suitable_values:
dim_values = self.ask()
if self.adapt_param is None:
fixed_suitable_values = True
else:
dim_values, fixed_suitable_values = self.adapt_hyper_param(dim_values)
self.n_initial_points_started += 1
self.start_new_run(dim_values)
else:
# use the estimator to ask for proposed hyper parameters
multi_dim_values = self.ask(n_points=self.max_parallel_jobs - self.n_job_running, strategy='cl_mean')
param_thr = self.adapt_param['param_thr']
par_cnt_scheme = self.adapt_param['par_cnt_scheme']
if 'parts_to_consider_for_cnt' in self.adapt_param:
parts_to_consider_for_cnt = self.adapt_param['parts_to_consider_for_cnt']
else:
parts_to_consider_for_cnt = ['total']
multi_unsuitable_values = []
for dim_values in multi_dim_values:
suitable_values, par_cnt_dict = check_parameter_count_for_sample(
dim_values, self.hyper_param_names, param_thr, par_cnt_scheme, parts_to_consider_for_cnt,
model_cfg=self.model_cfg)
if not suitable_values:
if self.n_unsuitable_points_for_estimator < self.max_n_unsuitable_points_for_estimator:
multi_unsuitable_values.append(dim_values)
else:
# do nothing. Try again for a suitable point or wait for a valid point to be returned to update the
# estimator (whichever comes first).
# TODO: do adapt_hyper_param, otherwise we might never get out of here. And possibly do
# ask(n_points=100) to get more points
print 'Found too many unsuitable values, doing nothing.'
else:
self.start_new_run(dim_values)
if multi_unsuitable_values:
n_unsuitable_values = len(multi_unsuitable_values)
print 'Using %d unsuitable values to lie about' % n_unsuitable_values
# after telling the estimator these (useless) points it will decrease self._n_initial_points by 1 every
# time, but we don't want to consider these (useless) points as a initial points
self._n_initial_points += n_unsuitable_values
# tell the estimator an artificial, high loss for the unsuitable hyper param values.
artificial_losses = [self.artificial_high_loss+0.005] * n_unsuitable_values
prev_time = time.time()
self.tell(multi_unsuitable_values, artificial_losses, fit=True)
print (time.time()-prev_time)
# increase the count for unsuitable points given to the estimator
self.n_unsuitable_points_for_estimator += n_unsuitable_values
new_iter_inds = range(self.iter_ind, self.iter_ind+n_unsuitable_values)
self.unsuitable_runs.extend(new_iter_inds)
self.iter_ind += n_unsuitable_values
self.all_dim_values.extend(multi_unsuitable_values)
# if np.mod(self.n_unsuitable_points_for_estimator, 100) == 0:
# print \
# 'Hit unsuitable point number %d.' % self.n_unsuitable_points_for_estimator
# if self.iter_ind == 0:
# # check for default values
# for par_ind, par_name in enumerate(self.hyper_param_names):
# if 'default' in self.hyper_param_dict[par_name]:
# if self.hyper_param_dict[par_name]['type'] == 'Integer':
# dim_values[par_ind] = int(self.hyper_param_dict[par_name]['default'])
# elif self.hyper_param_dict[par_name]['type'] == 'Real':
# dim_values[par_ind] = float(self.hyper_param_dict[par_name]['default'])
# elif self.hyper_param_dict[par_name]['type'] == 'Categorical':
# dim_values[par_ind] = self.hyper_param_dict[par_name]['default']
# else:
# raise ValueError(
# 'Type %s is not a valid parameter type' % self.hyper_param_dict[par_name]['type'])
return
def start_new_run(self, dim_values):
it_expname = 'run_' + str(self.iter_ind)
it_expdir = os.path.join(self.expdir, it_expname)
it_exp_recipe_dir = os.path.join(self.exp_recipe_dir, it_expname)
if os.path.isdir(it_expdir):
print 'WARNING: %s is already a directory!' % it_expdir
shutil.rmtree(it_expdir)
os.makedirs(it_expdir)
if os.path.isdir(it_exp_recipe_dir):
print 'WARNING: %s is already a directory!' % it_exp_recipe_dir
shutil.rmtree(it_exp_recipe_dir)
shutil.copytree(self.recipe, it_exp_recipe_dir)
# adapt the config files according to dim_values
self.prepare_configs(config_dir=it_exp_recipe_dir, requested_values=dim_values)
print '*** STARTING NEW MODEL ***'
print 'Model %d will use values:' % self.iter_ind
print dim_values
# compare the new model with previous models
if self.all_losses:
all_distances = []
all_runs_inds = []
for ref_run_ind, ref_values in enumerate(self.all_dim_values):
if ref_run_ind not in self.unsuitable_runs:
dist = distance(self.space, dim_values, ref_values)
all_distances.append(dist)
all_runs_inds.append(ref_run_ind)
tmp = np.argsort(all_distances)
sorted_distances = [all_distances[tmpi] for tmpi in tmp]
sorted_runs_inds = [all_runs_inds[tmpi] for tmpi in tmp]
closest_run = sorted_runs_inds[0]
smallest_distance = sorted_distances[0]
sorted_runs_with_loss_inds = [ind for ind in sorted_runs_inds if ind in self.all_losses.keys()]
sorted_distances_with_loss = \
[sorted_distances[tmpi] for tmpi, ind in enumerate(sorted_runs_inds) if ind in self.all_losses.keys()]
closest_run_with_loss = sorted_runs_with_loss_inds[0]
smallest_distance_with_loss = sorted_distances_with_loss[0]
print \
'Closest previous model is run_%d (distance: %f, loss=%f) with values:' \
% (closest_run_with_loss, smallest_distance_with_loss, self.all_losses[closest_run_with_loss])
print self.all_dim_values[closest_run_with_loss]
if closest_run_with_loss != closest_run:
print \
'Closest previous model without evaluated loss is run_%d (distance: %f) with values:' \
% (closest_run, smallest_distance)
print self.all_dim_values[closest_run]
# train and validate a model with the above config files
job_string = 'run %s --expdir=%s --recipe=%s --computing=%s --sweep_flag=%s' % (
self.command, it_expdir, it_exp_recipe_dir, self.computing, True)
os.system(job_string)
file_to_watch = os.path.join(it_expdir, 'val_sum.json')
self.watch_list[self.iter_ind] = file_to_watch
self.all_dim_values.append(dim_values)
self.iter_ind += 1
self.n_job_running += 1
def check_watch_files(self):
found_losses = dict()
for ind, watch_file in self.watch_list.iteritems():
if os.path.isfile(watch_file):
with open(watch_file, 'r') as fid:
val_sum = json.load(fid)
if self.selected_segment_length not in val_sum:
raise ValueError(
'did not find segment length %s in "val_sum.json"' % self.selected_segment_length)
loss = val_sum[self.selected_segment_length]
# if self.selected_task not in loss:
# raise ValueError('did not find task %s in "val_sum.json"' % self.selected_task)
# loss = loss[self.selected_task]
found_losses[ind] = loss
n_found_losses = len(found_losses)
if n_found_losses > 0:
self.n_job_running -= n_found_losses
for ind in found_losses:
del self.watch_list[ind]
self.process_losses(found_losses)
self.checkpoint()
def process_losses(self, losses):
for run_ind, loss in losses.iteritems():
print 'Found loss %.3f for model %d' % (loss, run_ind)
if math.isnan(loss):
loss = self.artificial_high_loss
print 'Found loss = NaN. Changing to loss = %.3f' % self.artificial_high_loss
if loss > self.artificial_high_loss:
loss = self.artificial_high_loss
print 'Found high loss. Changing to loss = %.3f' % self.artificial_high_loss
self.all_losses[run_ind] = loss
losses[run_ind] = loss
dim_values_of_losses = [self.all_dim_values[ind] for ind in losses]
found_losses = [losses[ind] for ind in losses]
# pass the new information to the optimizer
self.last_result = self.tell(dim_values_of_losses, y=found_losses, fit=True)
# self.all_results.append(self.last_result)
def adapt_hyper_param(self, dim_values, verbose=True):
adapt_hyper_param_name = self.adapt_param['param_name']
min_adapt_param = int(self.hyper_param_dict[adapt_hyper_param_name]['min'])
max_adapt_param = int(self.hyper_param_dict[adapt_hyper_param_name]['max'])
par_cnt_scheme = self.adapt_param['par_cnt_scheme']
if 'parts_to_consider_for_cnt' in self.adapt_param:
parts_to_consider_for_cnt = self.adapt_param['parts_to_consider_for_cnt']
else:
parts_to_consider_for_cnt = ['total']
param_thr = self.adapt_param['param_thr']
vals_dict = {
name: val for (name, val) in zip(self.hyper_param_names, dim_values)}
# Exceptional case:
if 'cnn_num_enc_lay' in vals_dict and vals_dict['cnn_num_enc_lay'] == 0:
# if no CNN part, output of LSTM should not be altered
vals_dict['concat_flatten_last_2dims_cnn'] = 'True'
dim_values[-1] = 'True'
adapt_param_value = min_adapt_param
prev_par_cnt_dict = dict()
while True:
vals_dict[adapt_hyper_param_name] = adapt_param_value
values_suitable, par_cnt_dict = check_parameter_count(
vals_dict, param_thr, par_cnt_scheme, parts_to_consider_for_cnt, model_cfg=self.model_cfg)
if not values_suitable:
if par_cnt_dict is None:
best_adapt_param_value = adapt_param_value - 1
break
elif par_cnt_dict['to_consider'] > param_thr:
# went over allowed parameter count, best value for adaptation parameter is previous value
best_adapt_param_value = adapt_param_value - 1
break
if values_suitable and 'cnn_num_enc_lay' in vals_dict and vals_dict['cnn_num_enc_lay'] == 0:
# there is no cnn, so no point in tuning the number of cnn filters. Just stop the adaptation and set
# adaptation parameter to min_adapt_param
best_adapt_param_value = min_adapt_param
prev_par_cnt_dict = par_cnt_dict
break
if adapt_param_value > max_adapt_param:
# reached maximum value vor adaptation parameter and still did not go over allowed_parameter_count*0.95
best_adapt_param_value = max_adapt_param + 1
break
adapt_param_value += 1
prev_par_cnt_dict = par_cnt_dict
actual_par_cnt_dict = prev_par_cnt_dict
if best_adapt_param_value < min_adapt_param or best_adapt_param_value > max_adapt_param or \
actual_par_cnt_dict['to_consider'] < param_thr*0.95:
fixed_values_suitable = False
else:
fixed_values_suitable = True
if verbose:
print_str = \
'Found suitable hyper parameter values, leading to %d number of trainable parameters, of which %d ' \
'where counted towards the requested number of trainable parameters (' % \
(actual_par_cnt_dict['total'], actual_par_cnt_dict['to_consider'])
for par_type, par_type_cnt in actual_par_cnt_dict.iteritems():
if par_type not in ['total', 'to_consider']:
print_str += '%s: %d; ' % (par_type, par_type_cnt)
print_str += ')'
print print_str
vals_dict[adapt_hyper_param_name] = best_adapt_param_value
dim_values = [vals_dict[name] for name in self.hyper_param_names]
return dim_values, fixed_values_suitable
def prepare_configs(self, config_dir, requested_values):
par_ind = 0
alternative_format_lines = []
for param_name in self.hyper_param_names:
param_info = self.hyper_param_dict[param_name]
param_info_keys = param_info.keys()
standard_format = 'config' in param_info_keys and 'field' in param_info_keys and 'name' in param_info_keys
alternative_format = any(['case' in key for key in param_info_keys])
if not standard_format and not alternative_format:
raise ValueError('No adjustments to the config files have been made for this parameter: %s' % param_name)
if standard_format:
config_file = os.path.join(config_dir, param_info['config'])
param_config_field = param_info['field']
param_config_name = param_info['name']
config_data = configparser.ConfigParser()
config_data.read(config_file)
# check if multiple fields have to be set
if ' ' in param_config_field:
param_config_fields = param_config_field.split(' ')
param_config_names = param_config_name.split(' ')
if len(param_config_fields) != len(param_config_names):
raise ValueError(
'A config name should be set for each config field. Got %d config fields and %d config names' %
(len(param_config_fields), len(param_config_names)))
for (param_config_field, param_config_name) in zip(param_config_fields, param_config_names):
config_data.set(param_config_field, param_config_name, str(requested_values[par_ind]))
else:
config_data.set(param_config_field, param_config_name, str(requested_values[par_ind]))
with open(config_file, 'w') as fid:
config_data.write(fid)
if alternative_format:
look_for_key = 'case_%s' % str(requested_values[par_ind]).lower()
if look_for_key in param_info_keys:
all_line_names = self.hyper_param_dict[param_name][look_for_key].split(' ')
for line_name in all_line_names:
alternative_format_lines.append(self.hyper_param_dict[param_name][line_name])
par_ind += 1
# Finally, apply the alternative format lines
for line in alternative_format_lines:
line_split = line.split(' ')
config_file = line_split[0]
config_file = os.path.join(config_dir, config_file)
param_config_field = line_split[1]
param_config_name = line_split[2]
param_values = ' '.join(line_split[3:])
config_data = configparser.ConfigParser()
config_data.read(config_file)
config_data.set(param_config_field, param_config_name, param_values)
with open(config_file, 'w') as fid:
config_data.write(fid)
def checkpoint(self, optimizer_name='optimizer'):
checkpoint_file = os.path.join(self.expdir, '%s.pkl' % optimizer_name)
with open(checkpoint_file, 'w') as fid:
pickle.dump(self, fid)
def tell(self, x, y, fit=True):
if fit:
self.models = []
# for ind in range(len(self.all_results)):
# if self.all_results[ind]:
# self.all_results[ind].models = []
result = super(HyperParamOptimizer, self).tell(x, y, fit=fit)
return result
def _tell(self, x, y, fit=True):
# Copied from skopt
"""Perform the actual work of incorporating one or more new points.
See `tell()` for the full description.
This method exists to give access to the internals of adding points
by side stepping all input validation and transformation."""
if "ps" in self.acq_func:
if is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
# if y isn't a scalar it means we have been handed a batch of points
elif is_listlike(y) and is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
else:
raise ValueError("Type of arguments `x` (%s) and `y` (%s) not compatible." % (type(x), type(y)))
# optimizer learned something new - discard cache
self.cache_ = {}
# after being "told" n_initial_points we switch from sampling
# random points to using a surrogate model
if fit and self._n_initial_points <= 0 and self.base_estimator_ is not None:
transformed_bounds = np.array(self.space.transformed_bounds)
est = clone(self.base_estimator_)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(self.space.transform(self.Xi), self.yi)
if hasattr(self, "next_xs_") and self.acq_func == "gp_hedge":
self.gains_ -= est.predict(np.vstack(self.next_xs_))
self.models.append(est)
# We're gonna lie to the estimator by telling it a loss for the points that are still being evaluated,
# similar to what we do when we ask for multiple points in ask().
points_running = self.watch_list.keys()
num_points_running = len(points_running)
points_to_lie_about = [self.all_dim_values[run_ind] for run_ind in points_running]
strategy = "cl_mean"
if strategy == "cl_min":
y_lie = np.min(self.yi) if self.yi else 0.0 # CL-min lie
elif strategy == "cl_mean":
y_lie = np.mean(self.yi) if self.yi else 0.0 # CL-mean lie
else:
y_lie = np.max(self.yi) if self.yi else 0.0 # CL-max lie
# Lie to the fake optimizer.
fake_est = copy.deepcopy(est)
X_to_tell = self.Xi + points_to_lie_about
X_to_tell = self.space.transform(X_to_tell)
y_to_tell = self.yi + list(np.ones(num_points_running) * y_lie)
fake_est.fit(X_to_tell, y_to_tell)
# even with BFGS as optimizer we want to sample a large number
# of points and then pick the best ones as starting points
# X = self.space.transform(self.space.rvs(
# n_samples=self.n_points, random_state=self.rng))
Xspace = self.space.rvs(n_samples=self.n_points, random_state=self.rng)
param_thr = self.adapt_param['param_thr']
par_cnt_scheme = self.adapt_param['par_cnt_scheme']
if 'parts_to_consider_for_cnt' in self.adapt_param:
parts_to_consider_for_cnt = self.adapt_param['parts_to_consider_for_cnt']
else:
parts_to_consider_for_cnt = ['total']
suitable_X, _ = check_parameter_count_for_sample(
Xspace, self.hyper_param_names, param_thr, par_cnt_scheme, parts_to_consider_for_cnt,
model_cfg=self.model_cfg)
# for x in Xspace:
# vals_suitable, _ = check_parameter_count_for_sample(
# x, self.hyper_param_names, param_thr, par_cnt_scheme)
# suitable_X.append(vals_suitable)
Xspace = [Xspace[ind] for ind, suit in enumerate(suitable_X) if suit]
X = self.space.transform(Xspace)
self.next_xs_ = []
for cand_acq_func in self.cand_acq_funcs_:
values = _gaussian_acquisition(
X=X, model=fake_est, y_opt=np.min(self.yi),
acq_func=cand_acq_func,
acq_func_kwargs=self.acq_func_kwargs)
# Find the minimum of the acquisition function by randomly
# sampling points from the space
if self.acq_optimizer == "sampling":
next_x = X[np.argmin(values)]
# Use BFGS to find the mimimum of the acquisition function, the
# minimization starts from `n_restarts_optimizer` different
# points and the best minimum is used
elif self.acq_optimizer == "lbfgs":
x0 = X[np.argsort(values)[:self.n_restarts_optimizer]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = Parallel(n_jobs=self.n_jobs)(
delayed(fmin_l_bfgs_b)(
gaussian_acquisition_1D, x,
args=(fake_est, np.min(self.yi), cand_acq_func, self.acq_func_kwargs),
bounds=self.space.transformed_bounds,
approx_grad=False,
maxiter=20)
for x in x0)
cand_xs = np.array([r[0] for r in results])
cand_acqs = np.array([r[1] for r in results])
next_x = cand_xs[np.argmin(cand_acqs)]
# lbfgs should handle this but just in case there are
# precision errors.
if not self.space.is_categorical:
next_x = np.clip(
next_x, transformed_bounds[:, 0],
transformed_bounds[:, 1])
self.next_xs_.append(next_x)
if self.acq_func == "gp_hedge":
logits = np.array(self.gains_)
logits -= np.max(logits)
exp_logits = np.exp(self.eta * logits)
probs = exp_logits / np.sum(exp_logits)
next_x = self.next_xs_[np.argmax(self.rng.multinomial(1, probs))]
else:
next_x = self.next_xs_[0]
# note the need for [0] at the end
self._next_x = self.space.inverse_transform(
next_x.reshape((1, -1)))[0]
# Pack results
return create_result(self.Xi, self.yi, self.space, self.rng, models=self.models)
def copy(self, random_state=None):
"""Create a shallow copy of an instance of the optimizer.
Parameters
----------
* `random_state` [int, RandomState instance, or None (default)]:
Set the random state of the copy.
"""
optimizer = HyperParamOptimizer(
hyper_param_conf=self.hyper_param_conf, command=self.command, expdir=self.expdir,
exp_recipe_dir=self.exp_recipe_dir, recipe=self.recipe, computing=self.computing,
exp_proposal_watch_dir=self.exp_proposal_watch_dir)
super(HyperParamOptimizer, optimizer).__init__(
dimensions=self.space.dimensions,
base_estimator=self.base_estimator_,
n_initial_points=self.n_initial_points_,
acq_func=self.acq_func,
acq_optimizer=self.acq_optimizer,
acq_func_kwargs=self.acq_func_kwargs,
acq_optimizer_kwargs=self.acq_optimizer_kwargs,
random_state=random_state)
optimizer.n_points = self.n_points
if hasattr(self, "gains_"):
optimizer.gains_ = np.copy(self.gains_)
if self.Xi:
optimizer._tell(self.Xi, self.yi)
return optimizer
def create_opt_only_val_loss(self):
new_opt = copy.deepcopy(self)
new_opt.Xi = []
new_opt.yi = []
new_opt.n_points = 4000
new_opt.process_losses(new_opt.all_losses)
new_opt.checkpoint('optimizer_only_valid_losses')
|
1649914
|
import basevcstest
class TestVCSLambert(basevcstest.VCSBaseTest):
def testLambert(self):
s = self.clt("clt")
iso = self.x.createisofill()
p = self.x.createprojection()
p.type = "lambert"
iso.projection = p
self.x.plot(s(latitude=(20, 60), longitude=(-140, -20)),
iso, bg=self.bg)
self.checkImage("test_vcs_lambert.png")
|
1649940
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def assignHost(VCs_query,level):
# Host range at the species level
gVC_tax={}
for vc in VCs_query:
gVC_tax[vc]=[]
for i in VCs_query:
idx=int(i.split('_')[1])
for uv in VCs[idx]:
assem=all_uvs_assem.get(uv)
if assem!=None:
for a in assem:
if level=='species':
gVC_tax[i].append(assem_to_spp[a])
if level=='genus':
gVC_tax[i].append(assem_to_genus[a])
if level=='family':
gVC_tax[i].append(assem_to_fam[a])
if level=='order':
gVC_tax[i].append(assem_to_order[a])
if level=='class':
gVC_tax[i].append(assem_to_class[a])
if level=='phylum':
gVC_tax[i].append(assem_to_phyla[a])
for k in gVC_tax.keys():
gVC_tax[k]=list(set(gVC_tax[k]))
return gVC_tax
scaff_to_gca={}
with open('gca_to_scaf.txt') as inFile:
for line in inFile:
scaff_to_gca[line.split()[1].strip()]=line.split()[0]
gca_to_scaff={}
for k in list(scaff_to_gca.keys()):
gca_to_scaff[scaff_to_gca[k]]=k
assem_to_fam={}
assem_to_order={}
assem_to_class={}
assem_to_phyla={}
assem_to_genus={}
assem_to_spp={}
fam_to_phyla={}
order_to_phyla={}
class_to_phyla={}
genus_to_phyla={}
genus_to_fam={}
genus_to_order={}
with open('hgg_bgi_taxonomy.tab') as inFile:
for line in inFile:
assem=line.split('\t')[0]
if len(assem.split('_'))==3:
assem=assem.split('_')[0]+'_'+assem.split('_')[1]+'#'+assem.split('_')[2]
elif 'scaffold' in assem:
assem=scaff_to_gca[assem]
fam=line.split('\t')[5]
phyla=line.split('\t')[2]
order=line.split('\t')[4]
genus=line.split('\t')[-2]
classB=line.split('\t')[3]
spp=line.split('\t')[-1].strip()
if 'Firmicutes' in phyla:
phyla='Firmicutes'
assem_to_fam[assem]=fam
assem_to_order[assem]=order
assem_to_class[assem]=classB
assem_to_phyla[assem]=phyla
assem_to_genus[assem]=genus
assem_to_spp[assem]=spp
fam_to_phyla[fam]=phyla
order_to_phyla[order]=phyla
class_to_phyla[classB]=phyla
genus_to_phyla[genus]=phyla
genus_to_fam[genus]=fam
genus_to_order[genus]=order
all_uvs_assem={} # uv -> assemblies (non-redundant)
with open('WG_crispr_targets.txt') as inFile:
for line in inFile:
try:
all_uvs_assem[line.split()[0]].append(line.strip().split()[1])
except:
all_uvs_assem[line.split()[0]]=[line.strip().split()[1]]
VCs=[]
with open('GPD_VCs.txt') as inFile:
for line in inFile:
toks=line.strip().split('\t')
if len(toks)>1: # No singletons
VCs.append(toks)
X_hq_deep={}
with open('bwa_processed_75_sampleNames.txt') as inFile:
for line in inFile:
toks=line.strip().split(',')
X_hq_deep[toks[0]]=toks[1:]
VC_toGenus={}
for idx in range(len(VCs)):
VC_toGenus[idx]=[]
for idx in range(len(VCs)):
for uv in VCs[idx]:
assem=all_uvs_assem.get(uv)
if assem!=None:
for a in assem:
VC_toGenus[idx].append(assem_to_genus[a])
if len(VC_toGenus[idx])!=0:
VC_toGenus[idx]=list(set(VC_toGenus[idx]))[0]
# I'm mapping uvigs to their VCs
uvs_to_VC={}
for vc_idx in range(len(VCs)):
for uv in VCs[vc_idx]:
uvs_to_VC[uv]=vc_idx
# Fetching metadata
n=1
run_toCountry={}
run_toContinent={}
run_toStatus={}
run_toDepth={}
run_toDisease={}
run_toPub={}
run_toAge={}
run_toStudy={}
run_toLife={}
with open('Gut-metagenomes_29052019.csv') as inFile:
for line in inFile:
if n==1:
n+=1
else:
if line.split(',')[2]=='Yes':
my_run=line.split(',')[0]
run_toCountry[my_run]=line.split(',')[13]
run_toContinent[my_run]=line.split(',')[14]
run_toStatus[my_run]=line.split(',')[5]
run_toDepth[my_run]=float(line.split(',')[1])
run_toDisease[my_run]=line.split(',')[6]
run_toPub[my_run]=line.strip().split(',')[-1]
run_toLife[my_run]=line.strip().split(',')[12]
age=line.split(',')[9]
if age!='NA':
run_toAge[my_run]=float(age)
else:
run_toAge[my_run]='NA'
run_toStudy[my_run]=line.split(',')[4]
all_samples=run_toStatus.keys()
samples_ds=[]
for i in all_samples:
if run_toDepth[i]>=0.5e8:
samples_ds.append(i)
samples_ds_uvs={}
for s in samples_ds:
samples_ds_uvs[s]=[]
for k in list(X_hq_deep.keys()):
for s in X_hq_deep[k]:
samples_ds_uvs[s].append(k)
# S4A
n_conts=[1,2,3,4,5,6]
VCs_conts=[]
VCs_set_glob=[] # Set of VCs found in 1,2...
for my_n in n_conts:
VCs_glob=[]
n=0
with open('VC_continent_span.txt') as inFile:
for line in inFile:
if n==0:
n+=1
else:
z=0
vc=line.split('\t')[0]
toks=line.strip().split('\t')[1:]
for t in toks:
if int(t)>0:
z+=1
if z==my_n: # Change this to control at least or exact
VCs_glob.append(vc)
VCs_conts.append(len(VCs_glob))
VCs_set_glob.append(VCs_glob)
df_contDist=pd.DataFrame()
df_contDist['Continents']=n_conts
df_contDist['Number of VCs']=VCs_conts
df_contDist=df_contDist.sort_values(by='Number of VCs')
hits=[]
for my_t in ['genus']:
gVC_genus=assignHost(VCs_set_glob[4]+VCs_set_glob[5],my_t)
n=0
z=0
for vc in gVC_genus.keys():
if len(gVC_genus[vc])!=0:
n+=1
if len(gVC_genus[vc])==1:
z+=1
hits.append(z/n)
n=0
z=0
for g in gVC_genus:
if len(gVC_genus[g])!=0:
n+=1
if len(gVC_genus[g])>1: # Is host range able to cross genera?
z+=1
print('Global')
print('Total assigned: '+str(n)+' Host range > 1 genus: '+str(z))
hits=[]
for my_t in ['genus']:
gVC_genus=assignHost(VCs_set_glob[0],my_t)
n=0
z=0
for vc in gVC_genus.keys():
if len(gVC_genus[vc])!=0:
n+=1
if len(gVC_genus[vc])==1:
z+=1
hits.append(z/n)
n=0
z=0
for g in gVC_genus:
if len(gVC_genus[g])!=0:
n+=1
if len(gVC_genus[g])>1: # Is host range able to cross genera?
z+=1
print('Single continent')
print('Total assigned: '+str(n)+' Host range > 1 genus: '+str(z))
sns.set(font_scale=1.2)
sns.set_style("whitegrid")
plt.figure (figsize=(5,5))
plt.ylim(0,0.3)
df_hr=pd.DataFrame()
df_hr['Distribution']=['Single continent VCs','Global VCs']
df_hr['Fraction of VCs with broad host range']=[347/3116.0,36/139.0]
sns.barplot(x='Distribution',y='Fraction of VCs with broad host range',data=df_hr)
plt.show()
# Figure_S4B
# Supplementary figure B
# On average, how many connections each global VC from each order has?
ords=['Bacteroidales','Lachnospirales','Oscillospirales']
order_gs=[]
av_genera=[]
for order_t in ords:
av_genera.append([])
vc_osc={}
with open('Global_dist_hostNet.txt') as inFile:
for line in inFile:
vc=line.split(',')[0]
g=line.strip().split(',')[1]
if genus_to_order[g]==order_t:
try:
vc_osc[vc].append(g)
except:
vc_osc[vc]=[g]
for k in vc_osc:
vc_osc[k]=list(set(vc_osc[k]))
av=0
n=0
avs_bact=[]
for k in vc_osc:
av+=len(vc_osc[k])
avs_bact.append(len(vc_osc[k]))
av_genera[-1].append(len(vc_osc[k]))
n+=1
order_gs.append(av/n)
df_orders_gs=pd.DataFrame()
df_orders_gs['Order']=ords
df_orders_gs['Genera per VC']=order_gs
df_orders_gs=df_orders_gs.sort_values(by='Genera per VC',ascending=False)
sns.set(font_scale=1.2)
plt.figure (figsize=(5,5))
sns.set_style("whitegrid")
#plt.ylim(0,4.0)
sns.barplot(x='Order',y='Genera per VC',data=df_orders_gs)
plt.show()
|
1649953
|
import six
def brackets_check(pattern):
"""
Check whether the pattern is missing square brackets, in a way which does
not require the usual parsing. This is a light hack to provide an improved
error message in this particular case.
:param pattern: A STIX pattern string
:return: True if the pattern had its brackets; False if not
"""
if isinstance(pattern, six.string_types):
# There can be an arbitrary number of open parens first... skip over
# those
for c in pattern:
if c != "(" and not c.isspace():
break
if c == "[":
result = True
else:
result = False
else:
result = False
return result
|
1650002
|
class DBParser():
def __init__(self):
print("----------Start DBParser ----------")
self.query = None
def split_query(self, query):
return query.strip().replace("\n", " ").split()
def get_index(self, statement):
return self.query.index(statement) if statement in self.query else None
def parse(self, query):
self.query = self.split_query(query)
print(self.query)
# Dealing INSERT Query
if self.query[0] == "INSERT":
in_local_path = self.query[self.get_index("INSERT")+1]
out_hdfs_path = self.query[self.get_index("INTO")+1]
num_partitions = self.query[self.get_index("PARTITIONS")+1]
return in_local_path, out_hdfs_path, num_partitions
#self.db_manager.insert(in_local_path, out_hdfs_path, num_partitions)
# Dealing SELECT Query
if self.query[0] == "SELECT":
sql_query = " ".join(self.query[:self.get_index("FOR")])
sql_query = " ".join(sql_query.split()[:3] + ['temp'] + sql_query.split()[4:])
hdfs_path = self.query[self.get_index("FROM")+1]
task, task_path = self.query[self.get_index("FOR")+1:]
return sql_query, hdfs_path, task, task_path
# self.db_manager.select(sql_query, hdfs_path, task, task_path)
|
1650018
|
class StrictVersion:
def __init__(self, version):
self.version = version
__all__ = ['VERSION']
VERSION = StrictVersion('0.32')
|
1650029
|
import unittest
import groundstation.objects.object_factory as object_factory
from groundstation.objects.base_object_pb2 import BaseObject, \
ROOT as TYPE_ROOT, \
UPDATE as TYPE_UPDATE, \
UNSET as TYPE_UNSET
from groundstation.objects.root_object_pb2 import RootObject
from groundstation.objects.update_object_pb2 import UpdateObject
def new_root_object(weak):
root = RootObject()
root.id = "butts"
root.channel = "butts"
root.protocol = "butts"
if not weak:
root.type = TYPE_ROOT
return root
def new_update_object(weak, parents=[]):
update = UpdateObject()
update.parents.extend(parents)
update.data = "butts"
if not weak:
update.type = TYPE_UPDATE
return update
class TypeOfTestCase(unittest.TestCase):
def test_strongly_typed_root(self):
root_str = new_root_object(False).SerializeToString()
self.assertEqual(object_factory.type_of(root_str), TYPE_ROOT)
def test_weakly_typed_root(self):
root_str = new_root_object(True).SerializeToString()
self.assertEqual(object_factory.type_of(root_str), TYPE_UNSET)
def test_strongly_typed_update(self):
update_str = new_update_object(False).SerializeToString()
self.assertEqual(object_factory.type_of(update_str), TYPE_UPDATE)
def test_weakly_typed_update(self):
update_str = new_update_object(True).SerializeToString()
self.assertEqual(object_factory.type_of(update_str), TYPE_UNSET)
|
1650043
|
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
from tornado.web import StaticFileHandler
from .dictionaries import discover_dictionaries, dictionaries_to_url
from ._version import __version__
class LanguageManagerHandler(APIHandler):
lang_dictionaries = []
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
@tornado.web.authenticated
def get(self):
self.finish({
'version': __version__,
'dictionaries': self.lang_dictionaries
})
def setup_handlers(web_app, url_path, server_app):
dictionaries = discover_dictionaries(server_app)
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
# Prepend the base_url so that it works in a JupyterHub setting
handlers = []
for lang in dictionaries:
lang_url = url_path_join(base_url, url_path, lang['id'])
handlers.append(
(
r"{}/(.*\.(?:aff|dic))".format(lang_url),
StaticFileHandler,
{"path": lang['path']}
)
)
web_app.add_handlers(host_pattern, handlers)
LanguageManagerHandler.lang_dictionaries = dictionaries_to_url(dictionaries, url_path_join(base_url, url_path))
# Prepend the base_url so that it works in a JupyterHub setting
route_pattern = url_path_join(base_url, url_path, "language_manager")
handlers = [(route_pattern, LanguageManagerHandler)]
web_app.add_handlers(host_pattern, handlers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.