index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,300 | 9f890442735b41559fb7d60792070073857eda23 | # open("aniket.txt")
f = open("aniket.txt")
print(f.readlines())
#print(f.readline())
# content = f.read()
# f.read()
# for line in f:
# print(line , end="")
#print(content)
f.close() # you should always close file |
14,301 | 3d47ce1678ddca391bd2d771714154a9ac486888 |
from scrapy.mail import MailSender
mail = MailSender(
) |
14,302 | 01e0718b5e991c4e62d47ec058260b7ced37f830 | """ 23. Write a Python program to check a list is empty or not."""
print("Question 23")
l1 = []
l2 = ['ram', 'sita', 'hari', 'ram', 'sita', 'gita']
def check_list(l):
if not l:
print("LIST EMPTY")
else:
print("LIST NOT EMPTY")
print(check_list(l1))
|
14,303 | a977b875647aed4d2105bae159f96b73b1ac11d5 | import numpy as np
import sys
import os
import sys
sys.path.append('src')
from scipy.constants import c, pi
from joblib import Parallel, delayed
from mpi4py.futures import MPIPoolExecutor
from mpi4py import MPI
from scipy.fftpack import fftshift, fft
import os
import time as timeit
os.system('export FONTCONFIG_PATH=/etc/fonts')
from functions import *
from time import time, sleep
import pickle
@profile
def oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P0_p1, P0_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, dAdzmm, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1, fopa):
mode_names = ['LP01a']
u = np.zeros(sim_wind.t.shape, dtype='complex128')
U = np.zeros(sim_wind.fv.shape, dtype='complex128') #
T0_p = TFWHM_p / 2 / (np.log(2))**0.5
T0_s = TFWHM_s / 2 / (np.log(2))**0.5
noise_new = noise_obj.noise_func(int_fwm)
u = noise_new
woff1 = (p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[p_pos[0]]
u[p_pos[0], :] += (P0_p1)**0.5 * np.exp(1j *
(woff1) * sim_wind.t[p_pos[0]])
woff2 = -(s_pos[1] - (int_fwm.nt - 1) // 2) * \
2 * pi * sim_wind.df[s_pos[0]]
u[s_pos[0], :] += (P0_s)**0.5 * np.exp(-1j *
(woff2) * sim_wind.t[s_pos[0]])
U = fftshift(fft(u), axes=-1)
master_index = str(master_index)
max_rounds = arguments_determine(-1)
if fopa:
print('Fibre amplifier!')
max_rounds = 0
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, max_rounds, mode_names, master_index, '00', 'original pump', D_pic[0], plots)
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder()
print('\a')
return None
class Band_predict(object):
def __init__(self, Df_band, nt):
self.bands = []
self.df = Df_band / nt
self.ro = []
def calculate(self, A, Df_band, over_band):
self.bands.append(Df_band)
self.ro.append(A)
if len(bands) == 1:
return Df_band + 1
a = (self.bands[-1] - self.bands[-2]) / (self.ro[-1] - self.ro[-2])
b = self.bands[-1] - a * self.ro[-1]
for i in over_band:
try:
Df_band[i] = a * arguments_determine(-1) + b
except TypeError:
Df_band[i] = None
return Df_band
if __name__ == '__main__':
start = time()
main()
dt = time() - start
print(dt, 'sec', dt / 60, 'min', dt / 60 / 60, 'hours')
|
14,304 | bf0ca858106411ec25275f6b53817521cf100b7b | import copy
import autoarray as aa
from typing import Tuple
class SettingsImagingCI(aa.SettingsImaging):
def __init__(
self,
parallel_pixels: Tuple[int, int] = None,
serial_pixels: Tuple[int, int] = None,
):
super().__init__()
self.parallel_pixels = parallel_pixels
self.serial_pixels = serial_pixels
def modify_via_fit_type(self, is_parallel_fit, is_serial_fit):
"""
Modify the settings based on the type of fit being performed where:
- If the fit is a parallel only fit (is_parallel_fit=True, is_serial_fit=False) the serial_pixels are set to None
and all other settings remain the same.
- If the fit is a serial only fit (is_parallel_fit=False, is_serial_fit=True) the parallel_pixels are set to
None and all other settings remain the same.
- If the fit is a parallel and serial fit (is_parallel_fit=True, is_serial_fit=True) the *parallel_pixels* and
*serial_pixels* are set to None and all other settings remain the same.
These settings reflect the appropriate way to extract the charge injection imaging data for fits which use a
parallel only CTI model, serial only CTI model or fit both.
Parameters
----------
is_parallel_fit
If True, the CTI model that is used to fit the charge injection data includes a parallel CTI component.
is_serial_fit
If True, the CTI model that is used to fit the charge injection data includes a serial CTI component.
"""
settings = copy.copy(self)
if is_parallel_fit:
settings.serial_pixels = None
if is_serial_fit:
settings.parallel_pixels = None
return settings
|
14,305 | 39888403c0b9d00eca6b87fd02f5e860c4a20223 | class ExcelReadError(BaseException):
pass
|
14,306 | 4a095ee119e28347f1bf95207a0346db95545a7a | import os
import copy
import time
import numpy
from PyRED.files import read_behaviour
from PyRED.tasks.generic import TaskLoader
class QuestionnaireLoader(TaskLoader):
"""Class to process files from the RED questionnaires.
"""
def __init__(self, data_dir, output_path=None, task_name="Q1_Questions"):
"""Initialises a new QuestionnaireLoader instance to read and process
data from files generated by the RED questionnaires.
Arguments
data_dir - String. Path to the directory that contains
data files that need to be loaded.
Keyword Arguments
output_path - String. Path to the file in which processed
data needs to be stored, or None to not write
the data to file. Default = None
task_name - String. Name of the task that needs to be
present in the data files. The names of data
files are assumed to be in the format
"taskname_ppname_yyyy-mm-dd-HH-MM-SS"
Default = "Q1_Questions"
"""
# Remember the task name.
self._task_name = task_name
# Load all data.
self.load_from_directory(data_dir, task_name)
self.process_raw_data()
if not (output_path is None):
self.write_processed_data_to_file(output_path)
def load_from_file(self, file_path, delimiter=",", missing=None, \
auto_typing=True, string_vars=None):
"""Loads data from a single file. This function overwrites the parent's
load_from_file function to allow for the checking of answers.
Arguments
file_path - String. Path to the file that needs to be loaded.
Keyword arguments
delimiter - String. Delimiter for the data file. Default = ","
missing - List. List of values that code for missing data, or
None if no such values exist. Note that all values
should be strings, as this is what the data will
initially be read as (missing data is converted before
auto-typing occurs). Default = None.
auto_typing - Bool. When True, variables will automatically be
converted to float where possible. Default = True
Returns
data - Whatever PyRED.files.read_behaviour returns.
"""
# Load the data from a file.
raw = read_behaviour(file_path, delimiter=",", missing=None, \
auto_typing=True, string_vars=["Response"])
# If the file is empty, return None.
if raw is None:
return None
return raw
def process_raw_data(self):
"""Computes the variables that need to be computed from this task, and
stores them in the self.data dict. This has one key for every variable
of interest, and each of these keys points to a NumPy array with shape
(N,) where N is the number of participants.
The processed data comes from the self.raw dict, so make sure that
self.load_from_directory is run before this function is.
"""
# Get all participant names, or return straight away if no data was
# loaded yet.
if hasattr(self, "raw"):
participants = self.raw.keys()
participants.sort()
else:
self.data = None
return
# Count the number of participants.
n = len(participants)
# Find out how many questions there were.
n_questions = 0
for i, ppname in enumerate(participants):
if self.raw[ppname] is None:
continue
if len(self.raw[ppname]["QuestionNumber"]) > n_questions:
n_questions = len(self.raw[ppname]["QuestionNumber"])
# Define some variables of interest.
vor = []
for i in range(n_questions):
vor.append("Q%d_resp" % (i+1))
vor.append("Q%d_RT" % (i+1))
# Create a data dict for each variable of interest.
self.data = {}
self.data["ppname"] = []
for var in vor:
self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN
# Loop through all participants.
for i, ppname in enumerate(participants):
# Add the participant name.
self.data["ppname"].append(copy.deepcopy(ppname))
# Skip empty datasets.
if self.raw[ppname] is None:
continue
# Compute stuff relevant to this task.
for j, qnr in enumerate(self.raw[ppname]["QuestionNumber"]):
# Split Questionnaire 3, Q13 and Q14 into sub-questions
if "Q3" in self._task_name and int(qnr) in [13,14]:
# These questions split out into two parts: A description
# of what each sub-part is, and a Boolean response for
# each sub-part in the question. Example:
# "1_1_1_1_1_0//Television_VideogameConsole(suchas:WiiUPlayStationXboxorNintendoDS)_Tablet(likeanIPad)_Smartphone_LaptoporDesktopComputer_Noneofthese"
bool_resp, descr = self.raw[ppname]["Response"][j].split("//")
bool_resp = map(int, bool_resp.split("_"))
descr = descr.split("_")
# Store the data in the dict.
for k, item in enumerate(descr):
# Clean up the item name.
if "(" in item:
item = item[:item.find("(")]
var = "Q%s_%s_resp" % (int(qnr), item)
# Create a new entry in the dict for this variable, if
# one doesn't exist yet.
if var not in self.data.keys():
self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN
# Store the data in the dict.
self.data[var][i] = bool_resp[k]
# Store response time for the whole item.
self.data["Q%s_RT" % (int(qnr))][i] = \
float(self.raw[ppname]["TimeEndQuestion"][j]) \
- float(self.raw[ppname]["TimeStartQuestion"][j])
# All other questions are one-question one-response:
else:
# Store the data in the dict.
self.data["Q%s_resp" % (int(qnr))][i] = \
float(self.raw[ppname]["Response"][j])
self.data["Q%s_RT" % (int(qnr))][i] = \
self.raw[ppname]["TimeEndQuestion"][j] \
- self.raw[ppname]["TimeStartQuestion"][j]
|
14,307 | e6444c10192ebc71a809213bcff0e045c63632d6 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
import input
devices = input.get_devices()
show_all = True
window = pyglet.window.Window(1024, 768)
batch = pyglet.graphics.Batch()
class TrackedElement(object):
def __init__(self, element):
self.element = element
self.label = pyglet.text.Label(element.name,
font_size=8,
x=x, y=y, anchor_y='top', batch=batch)
def update(self):
self.label.text = '%s: %s' % (self.element.name,
self.element.get_value())
x = 0
tracked_elements = []
for device in devices:
y = window.height
label = pyglet.text.Label(device.name or '', x=x, y=y, anchor_y='top', batch=batch)
y -= label.content_height
try:
device.open()
for element in device.elements:
if not show_all and not element.known:
continue
tracked_element = TrackedElement(element)
tracked_elements.append(tracked_element)
y -= tracked_element.label.content_height
if y < 0:
break
except input.InputDeviceExclusiveException:
msg = '(Device is exclusive)'
label = pyglet.text.Label(msg, x=x, y=y, anchor_y='top', batch=batch)
y -= label.content_height
x += window.width / len(devices)
@window.event
def on_draw():
window.clear()
batch.draw()
def update(dt):
for tracked_element in tracked_elements:
tracked_element.update()
pyglet.clock.schedule(update)
pyglet.app.run()
for device in devices:
device.close()
|
14,308 | 4ca6119a243a3727d9582fc650eaf7f8067b9079 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError: if the staging command process exited non-zero.
"""
command = self.registry.get((runtime, environment))
if not command:
# Many runtimes do not require a staging step; this isn't a problem.
log.debug(('No staging command found for runtime [%s] and environment '
'[%s].'), runtime, environment.name)
return
command.EnsureInstalled()
return command.Run(self.staging_area, descriptor, app_dir)
def GetStager(staging_area):
"""Get the default stager."""
return Stager(_STAGING_REGISTRY, staging_area)
def GetBetaStager(staging_area):
"""Get the beta stager, used for `gcloud beta *` commands."""
registry = _STAGING_REGISTRY.copy()
registry.update(_STAGING_REGISTRY_BETA)
return Stager(registry, staging_area)
def GetNoopStager(staging_area):
"""Get a stager with an empty registry."""
return Stager({}, staging_area)
|
14,309 | 38e4c0ec6c162fb0f006421c9546eec7bb54d269 | '''
You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2. Find all the next greater numbers for nums1's elements in the corresponding places of nums2.
The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2. If it does not exist, output -1 for this number.
Example
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2].
Output: [-1,3,-1]
Explanation:
For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.
For number 1 in the first array, the next greater number for it in the second array is 3.
For number 2 in the first array, there is no next greater number for it in the second array, so output -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4].
Output: [3,-1]
Explanation:
For number 2 in the first array, the next greater number for it in the second array is 3.
For number 4 in the first array, there is no next greater number for it in the second array, so output -1.
'''
class Solution:
"""
@param nums1: an array
@param nums2: an array
@return: find all the next greater numbers for nums1's elements in the corresponding places of nums2
"""
'''
使用简单解法:两重for循环,首先找到nums1元素在nums2数组中的位置,用标记变量found指示,然后扫描发现第一个找到的符合条件的元素,将其加入结果集
注意:使用python的for else语法,当for循环完全结束的时候(中途没有break),那么else的部分会执行,如果中途有break,那么else的部分也会被break掉
python的for else语法非常高级和方便,一定要学会使用
'''
def nextGreaterElement(self, nums1, nums2):
# Write your code here
if not nums1 or not nums2:
return []
result = []
for i in range(len(nums1)):
# 初始化标记变量
found = False
for j in range(len(nums2)):
if found and nums2[j] > nums1[i]:
result.append(nums2[j])
break
#控制更新标记变量
if nums2[j] == nums1[i]:
found = True
else:
result.append(-1)
return result
# use monontone stack
def nextGreaterElement(self, nums1, nums2):
if not nums1 or not nums2:
return []
# 声明单调栈
monoStack = []
# 存放从右往左扫描,每一个元素的向右延伸的第一个比它大的元素
eleGreaterMap = {}
# 返回结果
result = []
for i in range(len(nums2) - 1, -1, -1):
# 每一个元素放入栈之前需要对栈进行调整,如果元素破坏了栈的单调性,那么我们需要对栈里元素逐一pop,
# 直到当前元素找到了它在栈中的位置
# 注意这里使用的是while,我们需要一直不断地调整栈,知道栈单调
while monoStack and nums2[i] > monoStack[-1]:
monoStack.pop()
# 如果栈在调整之后变空,那么代表当前元素向右延伸找不到比它大的元素,我们将其结果记为-1
if not monoStack:
eleGreaterMap[nums2[i]] = -1
# 否则栈顶元素就是答案
else:
eleGreaterMap[nums2[i]] = monoStack[-1]
# 将当前元素入栈
monoStack.append(nums2[i])
# 扫描数组1,找到对应的结果
for ele in nums1:
result.append(eleGreaterMap[ele])
return result
|
14,310 | 168895b37206e2e37b2372ce933e6156df6472c5 | # License: BSD 3 clause
import unittest
from itertools import product
import numpy as np
from tick.base.inference import InferenceTest
from tick.linear_model import SimuPoisReg, PoissonRegression
from tick.simulation import weights_sparse_gauss
class Test(InferenceTest):
def setUp(self):
self.float_1 = 5.23e-4
self.float_2 = 3.86e-2
self.int_1 = 3198
self.int_2 = 230
self.X = np.zeros((5, 5))
@staticmethod
def get_train_data(n_samples=2000, n_features=20, fit_intercept=True):
np.random.seed(123)
weights0 = weights_sparse_gauss(n_features, nnz=2)
if fit_intercept:
intercept0 = 1.
else:
intercept0 = None
X, y = SimuPoisReg(weights0, intercept0, n_samples=n_samples, seed=123,
verbose=False).simulate()
return X, y, weights0, intercept0
def test_PoissonRegression_run(self):
"""...Test PoissonRegression runs with different solvers and penalties
"""
n_samples = 200
n_features = 10
for fit_intercept in [False, True]:
X, y, weights0, intercept0 = self.get_train_data(
n_samples=n_samples, n_features=n_features,
fit_intercept=fit_intercept)
for solver, penalty in product(PoissonRegression._solvers,
PoissonRegression._penalties):
if solver == 'bfgs' and (penalty not in ['zero', 'l2']):
continue
if penalty == 'binarsity':
learner = PoissonRegression(
verbose=False, fit_intercept=fit_intercept,
solver=solver, penalty=penalty, max_iter=1, step=1e-5,
blocks_start=[0], blocks_length=[1])
else:
learner = PoissonRegression(
verbose=False, fit_intercept=fit_intercept,
solver=solver, penalty=penalty, max_iter=1, step=1e-5)
learner.fit(X, y)
self.assertTrue(np.isfinite(learner.weights).all())
if fit_intercept:
self.assertTrue(np.isfinite(learner.intercept))
def test_PoissonRegression_fit(self):
"""...Test PoissonRegression fit with default parameters
"""
n_samples = 2000
n_features = 20
for fit_intercept in [False, True]:
X, y, weights0, intercept0 = self.get_train_data(
n_samples=n_samples, n_features=n_features,
fit_intercept=fit_intercept)
learner = PoissonRegression(C=1e3, verbose=False,
fit_intercept=fit_intercept,
solver='bfgs')
learner.fit(X, y)
err = np.linalg.norm(learner.weights - weights0) / n_features
self.assertLess(err, 1e-2)
if fit_intercept:
self.assertLess(np.abs(learner.intercept - intercept0), 1e-1)
def test_PoissonRegression_settings(self):
"""...Test PoissonRegression basic settings
"""
# solver
from tick.solver import AGD, GD, BFGS, SGD, SVRG, SDCA
solvers = {
'AGD': AGD,
'BFGS': BFGS,
'GD': GD,
'SGD': SGD,
'SVRG': SVRG,
'SDCA': SDCA
}
solver_class_map = PoissonRegression._solvers
for solver in PoissonRegression._solvers.keys():
learner = PoissonRegression(solver=solver)
solver_class = solvers[solver_class_map[solver]]
self.assertTrue(isinstance(learner._solver_obj, solver_class))
msg = '^``solver`` must be one of agd, bfgs, gd, sgd, svrg, ' \
'got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
PoissonRegression(solver='wrong_name')
prox_class_map = PoissonRegression._penalties
for penalty in PoissonRegression._penalties.keys():
if penalty == 'binarsity':
learner = PoissonRegression(penalty=penalty, blocks_start=[0],
blocks_length=[1])
else:
learner = PoissonRegression(penalty=penalty)
prox_class = prox_class_map[penalty]
self.assertTrue(isinstance(learner._prox_obj, prox_class))
msg = '^``penalty`` must be one of binarsity, elasticnet, l1, l2, ' \
'none, tv, got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
PoissonRegression(penalty='wrong_name')
def test_PoissonRegression_model_settings(self):
"""...Test LogisticRegression setting of parameters of model
"""
for solver in PoissonRegression._solvers.keys():
learner = PoissonRegression(fit_intercept=True, solver=solver)
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
learner.fit_intercept = False
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner = PoissonRegression(fit_intercept=False, solver=solver)
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner.fit_intercept = True
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
def test_PoissonRegression_penalty_C(self):
"""...Test PoissonRegression setting of parameter of C
"""
for penalty in PoissonRegression._penalties.keys():
if penalty != 'none':
if penalty == 'binarsity':
learner = PoissonRegression(
penalty=penalty, C=self.float_1, blocks_start=[0],
blocks_length=[1])
else:
learner = PoissonRegression(penalty=penalty,
C=self.float_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
learner.C = self.float_2
self.assertEqual(learner.C, self.float_2)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_2)
msg = '^``C`` must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
if penalty == 'binarsity':
PoissonRegression(penalty=penalty, C=-1,
blocks_start=[0], blocks_length=[1])
else:
PoissonRegression(penalty=penalty, C=-1)
else:
msg = '^You cannot set C for penalty "%s"$' % penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
PoissonRegression(penalty=penalty, C=self.float_1)
learner = PoissonRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.C = self.float_1
msg = '^``C`` must be positive, got -2$'
with self.assertRaisesRegex(ValueError, msg):
learner.C = -2
def test_PoissonRegression_penalty_elastic_net_ratio(self):
"""...Test PoissonRegression setting of parameter of elastic_net_ratio
"""
ratio_1 = 0.6
ratio_2 = 0.3
for penalty in PoissonRegression._penalties.keys():
if penalty == 'elasticnet':
learner = PoissonRegression(penalty=penalty, C=self.float_1,
elastic_net_ratio=ratio_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
self.assertEqual(learner._prox_obj.ratio, ratio_1)
learner.elastic_net_ratio = ratio_2
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_2)
self.assertEqual(learner._prox_obj.ratio, ratio_2)
else:
msg = '^Penalty "%s" has no elastic_net_ratio attribute$$' % \
penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
if penalty == 'binarsity':
PoissonRegression(penalty=penalty,
elastic_net_ratio=0.8,
blocks_start=[0], blocks_length=[1])
else:
PoissonRegression(penalty=penalty,
elastic_net_ratio=0.8)
if penalty == 'binarsity':
learner = PoissonRegression(
penalty=penalty, blocks_start=[0], blocks_length=[1])
else:
learner = PoissonRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.elastic_net_ratio = ratio_1
def test_PoissonRegression_solver_basic_settings(self):
"""...Test LogisticRegression setting of basic parameters of solver
"""
for solver in PoissonRegression._solvers.keys():
# tol
learner = PoissonRegression(solver=solver, tol=self.float_1)
self.assertEqual(learner.tol, self.float_1)
self.assertEqual(learner._solver_obj.tol, self.float_1)
learner.tol = self.float_2
self.assertEqual(learner.tol, self.float_2)
self.assertEqual(learner._solver_obj.tol, self.float_2)
# max_iter
learner = PoissonRegression(solver=solver, max_iter=self.int_1)
self.assertEqual(learner.max_iter, self.int_1)
self.assertEqual(learner._solver_obj.max_iter, self.int_1)
learner.max_iter = self.int_2
self.assertEqual(learner.max_iter, self.int_2)
self.assertEqual(learner._solver_obj.max_iter, self.int_2)
# verbose
learner = PoissonRegression(solver=solver, verbose=True)
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
learner.verbose = False
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner = PoissonRegression(solver=solver, verbose=False)
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner.verbose = True
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
# print_every
learner = PoissonRegression(solver=solver, print_every=self.int_1)
self.assertEqual(learner.print_every, self.int_1)
self.assertEqual(learner._solver_obj.print_every, self.int_1)
learner.print_every = self.int_2
self.assertEqual(learner.print_every, self.int_2)
self.assertEqual(learner._solver_obj.print_every, self.int_2)
# record_every
learner = PoissonRegression(solver=solver, record_every=self.int_1)
self.assertEqual(learner.record_every, self.int_1)
self.assertEqual(learner._solver_obj.record_every, self.int_1)
learner.record_every = self.int_2
self.assertEqual(learner.record_every, self.int_2)
self.assertEqual(learner._solver_obj.record_every, self.int_2)
def test_PoissonRegression_solver_step(self):
"""...Test LogisticRegression setting of step parameter of solver
"""
for solver in PoissonRegression._solvers.keys():
if solver == 'bfgs':
learner = PoissonRegression(solver=solver)
self.assertIsNone(learner.step)
learner = PoissonRegression(solver=solver, step=self.float_1)
self.assertIsNone(learner.step)
msg = '^Solver "bfgs" has no settable step$'
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.step = self.float_2
self.assertIsNone(learner.step)
else:
learner = PoissonRegression(solver=solver, step=self.float_1)
self.assertEqual(learner.step, self.float_1)
self.assertEqual(learner._solver_obj.step, self.float_1)
learner.step = self.float_2
self.assertEqual(learner.step, self.float_2)
self.assertEqual(learner._solver_obj.step, self.float_2)
def test_PoissonRegression_solver_random_state(self):
"""...Test PoissonRegression setting of random_state parameter of solver
"""
for solver in PoissonRegression._solvers.keys():
if solver in ['agd', 'gd', 'bfgs']:
msg = '^Solver "%s" has no settable random_state$' % solver
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = PoissonRegression(solver=solver, random_state=1)
self.assertIsNone(learner.random_state)
else:
learner = PoissonRegression(solver=solver,
random_state=self.int_1)
self.assertEqual(learner.random_state, self.int_1)
self.assertEqual(learner._solver_obj.seed, self.int_1)
msg = '^random_state must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
PoissonRegression(solver=solver, random_state=-1)
msg = '^random_state is readonly in PoissonRegression$'
with self.assertRaisesRegex(AttributeError, msg):
learner = PoissonRegression(solver=solver)
learner.random_state = self.int_2
def test_safe_array_cast(self):
"""...Test error and warnings raised by LogLearner constructor
"""
msg = '^Copying array of size \(5, 5\) to convert it in the ' \
'right format$'
with self.assertWarnsRegex(RuntimeWarning, msg):
PoissonRegression._safe_array(self.X.astype(int))
msg = '^Copying array of size \(3, 5\) to create a ' \
'C-contiguous version of it$'
with self.assertWarnsRegex(RuntimeWarning, msg):
PoissonRegression._safe_array(self.X[::2])
np.testing.assert_array_equal(self.X,
PoissonRegression._safe_array(self.X))
@unittest.skip("has mismatch on newer pythons")
def test_predict(self):
"""...Test PoissonRegression predict
"""
X_train, y_train, _, _ = self.get_train_data(n_samples=200,
n_features=12)
learner = PoissonRegression(random_state=32789, tol=1e-9)
learner.fit(X_train, y_train)
X_test, y_test, _, _ = self.get_train_data(n_samples=5, n_features=12)
y_pred = np.array([1., 5., 0., 5., 6.])
np.testing.assert_array_almost_equal(learner.predict(X_test), y_pred)
@unittest.skip("has mismatch on newer pythons")
def test_decision_function(self):
"""...Test PoissonRegression decision function
"""
X_train, y_train, _, _ = self.get_train_data(n_samples=200,
n_features=12)
learner = PoissonRegression(random_state=32789, tol=1e-9)
learner.fit(X_train, y_train)
X_test, y_test, _, _ = self.get_train_data(n_samples=5, n_features=12)
y_pred = np.array([1.1448, 5.2194, 0.2624, 4.5525, 6.4168])
np.testing.assert_array_almost_equal(
learner.decision_function(X_test), y_pred, decimal=4)
@unittest.skip("has mismatch on newer pythons")
def test_loglik(self):
"""...Test PoissonRegression loglik function
"""
X_train, y_train, _, _ = self.get_train_data(n_samples=200,
n_features=12)
learner = PoissonRegression(random_state=32789, tol=1e-9)
learner.fit(X_train, y_train)
X_test, y_test, _, _ = self.get_train_data(n_samples=5, n_features=12)
np.testing.assert_array_almost_equal(
learner.loglik(X_test, y_test), 1.8254, decimal=4)
if __name__ == "__main__":
unittest.main()
|
14,311 | 8ff04450d415e796640a38ad7f7f207bb5a40b01 | import random
def stunned():
messa = ["Did You See Those Stars?","Whhhhaaaaaa!!!!",
"Why Is There Two Of Em Now!?!","Check Please.",
"Eeny, Meeny, Miny, Whoaaa!!","Is There Doctor In The House!"
]
print(random.choice(messa))
hit=0
return hit
stat_switch={
"stun":stunned
}
|
14,312 | 2340eb3fdacc3b40d416d34d5ccb8f1176407254 | # -*- coding:utf-8 -*-
# author yuzuo.yz 2017/7/26 20:38
# dict 字典map构建函数
jsonObj = dict(name="kaka", age=10, title="player")
# 类似json赋值的方式构建dict对象
dict2 = {
'name': 'messi',
'age': 22
}
print jsonObj
print dict2
print type(jsonObj)
print dir(jsonObj)
print type(dict2)
print dir(dict2)
a = dict()
a["a"] = "b"
print a
a2 = {"name" : "juliy"}
a2["age"] = 22
print a2
print type(a2)
print dir(a2)
|
14,313 | 82dc8b6ac8a28e505a9f3265bdf91db39c4bffdb | from __future__ import print_function
import os
import yaml
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
def parser(yml_file=None):
cmssw_base = os.getenv("CMSSW_BASE")
# command line options parsing
options = VarParsing.VarParsing()
options.register('maxEvents',
100,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"maximum number of events")
options.register('globalTag',
'',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"condition global tag for the job (\"auto:run2_data\" is default)")
options.register('year',
2017,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"year of data taking")
options.register('type',
'data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"data or mc")
options.register('xsection',
-1.,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
"MC cross section")
options.register('triggerInfo',
'',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Trigger info")
options.register('outputFile',
"ntuple.root",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"name for the output root file (\"ntuple.root\" is default)")
options.register('inputFiles',
'',
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"files to process")
options.register('json',
'',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"JSON file (do not use with CRAB!)")
options.register('version',
'',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"ntuple production version")
options.parseArguments()
## default_info.yml
input_files = []
global_tag = ''
def_info = cmssw_base+'/src/Analysis/Ntuplizer/data/ntuples/'+str(options.year)+'/v'+str(options.version)+'/default_info.yml'
with open(def_info) as f:
def_info_data = yaml.safe_load(f)
for dttype,info in def_info_data.items():
if dttype != options.type:
continue
input_files = info['input_files']
global_tag = info['global_tag'][0]
trigger_info=cmssw_base+'/src/Analysis/Ntuplizer/data/ntuples/'+str(options.year)+'/v'+str(options.version)+'/trigger_info.yml'
# set defaults
if not options.inputFiles:
options.setDefault('inputFiles',input_files)
if not options.globalTag:
options.setDefault('globalTag',global_tag)
if not options.triggerInfo:
options.setDefault('triggerInfo',trigger_info)
##
print('Python Configuration Options')
print('----------------------------')
print("version : ", options.version)
print("year : ", options.year)
print("type : ", options.type)
print("globalTag : ", options.globalTag)
print("triggerInfo : ", options.triggerInfo)
print("inputFiles : ", options.inputFiles)
print("outputFile : ", options.outputFile)
print("maxEvents : ", options.maxEvents)
if options.type == 'mc':
print("xsection : ", options.xsection)
if options.json:
print("json : ", options.json)
print('----------------------------')
print
return options
|
14,314 | 3b7e06a5382f2b67cd961ac716943a0b2f1a6ee8 | #!/usr/bin/env python3
from problems import *
from vectors import *
import sys, os
vector_content = ''
with open(sys.argv[1]) as file:
vector_content = Vectors(file.readlines(), sys.argv[6], sys.argv[5])
for access in os.listdir(sys.argv[2]):
if access.endswith('.txt'):
print(access)
with open(os.path.join(sys.argv[2], access), 'r') as file:
correct = [0,0]
with open(os.path.join(sys.argv[3], access), 'w') as output:
for line in file.readlines():
correct[1] += 1
analogy = Problem(line.split(' '))
guess = vector_content.d_val(analogy.blind())
if analogy.quality_check(guess):
correct[0] += 1
output.write(str(guess))
eval_file = sys.argv[3] + '/eval.txt'
with open(eval_file, 'a') as eval:
eval.write(access)
eval.write(f'\nACCURACY TOP1: {correct[0]/correct[1]}% ({correct[0]}/{correct[1]})\n')
|
14,315 | c2730f3a1460be9ab9758eb34a0bb0427daf0d8f | import sys
from collections import deque
n = int(sys.stdin.readline())
d = deque()
for i in range(n):
d.append(i+1)
while len(d) > 1:
d.popleft()
d.append(d.popleft())
print(d.pop())
|
14,316 | 560a45acb8f20b8221f151582949bbd89c384adb | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Palestra',
fields=[
('slug', models.SlugField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=256)),
('info', models.TextField(blank=True, verbose_name='informações')),
],
options={
'ordering': ('nome',),
},
),
migrations.CreateModel(
name='Palestrante',
fields=[
('slug', models.SlugField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=256)),
('foto', models.URLField(blank=True)),
('info', models.TextField(blank=True, verbose_name='informações')),
],
options={
'ordering': ('nome',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('slug', models.SlugField(primary_key=True, serialize=False)),
('nome', models.CharField(unique=True, max_length=32)),
],
options={
'ordering': ('nome',),
},
),
migrations.CreateModel(
name='TipoTag',
fields=[
('slug', models.SlugField(primary_key=True, serialize=False)),
('nome', models.CharField(unique=True, max_length=32)),
('cor', models.CharField(validators=[django.core.validators.RegexValidator('^#[0-9A-Fa-f]{6}$')], default='#000000', max_length=7)),
],
options={
'ordering': ('nome',),
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('url', models.URLField(unique=True, verbose_name='URL')),
('palestra', models.ForeignKey(related_name='videos', to='palestra.Palestra')),
],
options={
'ordering': ('palestra', 'url'),
'verbose_name': 'vídeo',
},
),
migrations.AddField(
model_name='tag',
name='tipo',
field=models.ForeignKey(related_name='tags', to='palestra.TipoTag'),
),
migrations.AddField(
model_name='palestra',
name='palestrantes',
field=models.ManyToManyField(blank=True, related_name='palestras', to='palestra.Palestrante'),
),
migrations.AddField(
model_name='palestra',
name='tags',
field=models.ManyToManyField(blank=True, related_name='palestras', to='palestra.Tag'),
),
]
|
14,317 | 91aa366b108031148a292ee33859733bdf332582 | """Base functionality for parsers."""
from abc import ABC, abstractmethod
from typing import Any
class Parser(ABC):
"""Base parser class."""
@abstractmethod
def __call__(self, config: Any):
raise NotImplementedError()
class ChainParser(Parser):
"""A parser that applies parsers sequentially."""
def __init__(self, *parsers: Parser):
self.parsers = parsers
def __call__(self, config: Any):
parsed = config
for parser in self:
parsed = parser(parsed)
return parsed
def __iter__(self):
return iter(self.parsers)
def __repr__(self):
return f"{self.__class__.__name__}({', '.join(map(repr, self))})"
|
14,318 | adbe6cf635a0ec97b1620334c62e9f8381b3581d | import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from spikesorting_tsne.spikesorting_tsne import constants as ct
import pandas as pd
def _get_relevant_channels_with_threshold(threshold, template):
amplitude = np.nanmax(template) - np.nanmin(template)
points_over_threshold = np.argwhere(template > (np.nanmax(template) - threshold * amplitude))
channels_over_threshold = np.unique(points_over_threshold[:, 1])
return channels_over_threshold
def _get_relevant_channels_over_median_peaks(threshold, template):
median = np.median(np.nanmin(template, axis=0))
std = np.std(np.nanmin(template, axis=0))
points_under_median = np.argwhere(template < (median - threshold*std))
channels_over_threshold = np.unique(points_under_median[:, 1])
return channels_over_threshold
def _normalize(L, normalizeFrom=0, normalizeTo=1):
'''normalize values of a list to make its min = normalizeFrom and its max = normalizeTo'''
vMax = max(L)
vMin = min(L)
return [(x-vMin)*(normalizeTo - normalizeFrom) / (vMax - vMin) for x in L]
# ------------------
def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,
used_spikes_indices=None, position_mult=2.25, threshold=0.1):
"""
Generate positions (x, y coordinates) for each spike on the probe. This function assumes that the spikes were
generated with the kilosort algorithm so the base_folder holds all the necessary .npy arrays.
In order for this function to find which channels are the most relevant in each spike it looks into the spike's
assigned template (a channels x time points array in spike_templates.npy). It then find the minimum points of all
channels, takes their median and their standard deviation and for each channel creates the difference between the
minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose
difference is larger than a number of times (threshold) over the standard deviation.
It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value
and the channel's time series median value (over time), orders the channels according to these differences and
assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference).
It finally finds the x, y positions of the selected channels and adds to the position of the largest difference
channel the weighted average positions of the remaining selected channels
:param base_folder: the folder name into which the kilosort result .npy arrays are
:type base_folder: string
:param binary_data_filename: the name of the binary file that holds the raw data that were originally passed to kilosort
:type binary_data_filename: string
:param number_of_channels_in_binary_file: How many channels does the binary file have (this is different to the number
of channels that are set to active in kilosort)
:type number_of_channels_in_binary_file: int
:param used_spikes_indices: which of the spikes found by kilosort should be considered.
:type used_spikes_indices: int[:]
:param threshold: the number of times the standard deviation should be larger than the difference between a
channel's minimum and the median of the minima of all channels in order to demarcate the channel as
relevant to the spike
:type threshold: float
:return: The position of each spike on the probe, the distance of eac h spike on the probe from the 0, 0 of the
probe, the indices of the original ordering of the spikes on the new order sorted according to their distance on
the probe, the distance of each spike on the probe sorted
:rtype: float32[len(used_spike_indices), 2], float32[len(used_spike_indices)], int[len(used_spike_indices)], float32[len(used_spike_indices)]
"""
# Load the required data from the kilosort folder
channel_map = np.load(os.path.join(base_folder, 'channel_map.npy'))
active_channel_map = np.squeeze(channel_map, axis=1)
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
spike_templates = np.load(os.path.join(base_folder, ct.SPIKE_TEMPLATES_FILENAME))
templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))
data_raw = np.memmap(os.path.join(base_folder, binary_data_filename),
dtype=np.int16, mode='r')
number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)
data_raw_kilosorted = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')
spike_times = np.squeeze(np.load(os.path.join(base_folder, ct.SPIKE_TIMES_FILENAME)).astype(np.int))
time_points = 50
if used_spikes_indices is None:
used_spikes_indices = np.arange(0, len(spike_times))
# Run the loop over all spikes to get the positions
counter = 0
weighted_average_postions = np.empty((len(used_spikes_indices), 2))
spike_distance_on_probe = np.empty(len(used_spikes_indices))
for spike_index in np.arange(0, len(used_spikes_indices)):
spike_raw_data = data_raw_kilosorted[active_channel_map,
(spike_times[used_spikes_indices[spike_index]]-time_points):
(spike_times[used_spikes_indices[spike_index]]+time_points)]
template = templates[spike_templates[used_spikes_indices[spike_index]], :, :].squeeze()
relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)
spike_raw_data_median_over_time = np.median(spike_raw_data, axis=1)
peaks_to_median = spike_raw_data_median_over_time - spike_raw_data.min(axis=1)
peaks_to_median = peaks_to_median[relevant_channels]
relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]
peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)
peaks_to_median_sorted.append(np.median(spike_raw_data_median_over_time[relevant_channels]))
weights = _normalize(peaks_to_median_sorted)[:-1]
relevant_channels_positions = channel_positions[relevant_channels_sorted]
pos_x = relevant_channels_positions[0, 0]
pos_y = relevant_channels_positions[0, 1]
new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])
new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])
weighted_average_postions[spike_index, :] = [new_pos_x, new_pos_y]
spike_distance_on_probe[spike_index] = np.sqrt(np.power(new_pos_x, 2) + np.power(new_pos_y, 2))
counter += 1
if counter % 5000 == 0:
print('Completed ' + str(counter) + ' spikes')
weighted_average_postions = weighted_average_postions * position_mult
# sort according to position on probe
spike_indices_sorted_by_probe_distance = np.array([b[0] for b in sorted(enumerate(spike_distance_on_probe),
key=lambda dist: dist[1])])
spike_distances_on_probe_sorted = np.array([b[1] for b in sorted(enumerate(spike_distance_on_probe),
key=lambda dist: dist[1])])
np.save(os.path.join(base_folder, ct.WEIGHTED_SPIKE_POSITIONS_FILENAME), weighted_average_postions)
return weighted_average_postions, spike_distance_on_probe, \
spike_indices_sorted_by_probe_distance, spike_distances_on_probe_sorted
def generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):
"""
Generate positions (x, y coordinates) for each template found by kilosort on the probe or passed to it by the
new_templates_array.
This function assumes that the base_folder holds all the necessary .npy arrays.
If no new_templates_array is passed it will look for the templates.npy file (created by kilosort) which is the
average of all spikes for each template (so a (templates x time x channels) data cube). It will also try to find the
file template_marking.npy which is produced after cleaning using the spikesort_tsne_guis.clean_kilosort_templates
GUI. If this is found only the non noise templates will have their position evaluated. If not found all templates
will be considered.
If a new_templates_array is passed (a data cube of either (templates x time x channels) or (templates x channels x time)
dimensions) then this will be used to calculate the positions.
In order for this function to find which channels are the most relevant in each template it looks into the
template (a (channels x time) array). It then find the minimum points of all
channels, takes their median and their standard deviation and for each channel creates the difference between the
minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose
difference is larger than a number of times (threshold) over the standard deviation.
It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value
and the channel's time series median value (over time), orders the channels according to these differences and
assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference).
It finally finds the x, y positions of the selected channels and adds to the position of the largest difference
channel the weighted average positions of the remaining selected channels
:param base_folder: the folder name into which the kilosort result .npy arrays are
:type base_folder: string
:param threshold: the number of times the standard deviation should be larger than the difference between a
channel's minimum and the median of the minima of all channels in order to demarcate the channel asvrelevant to the
spike
:type threshold: float
:param new_templates_array: an array that is the average over spikes of all templates
:type new_templates_array: float32[templates x channels x time]
:return: weighted_average_postions : the positions of the templates on the probe
:rtype: weighted_average_postions : float32[len(used_spike_indices) x 2]
"""
# Load the required data from the kilosort folder
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
if new_templates_array is None:
try:
templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))
except FileNotFoundError:
exit('No new_templates_array passed and no templates.npy found in folder')
try:
template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))
except FileNotFoundError:
template_markings = np.ones((len(templates)))
templates = templates[template_markings > 0, :, :]
else:
if new_templates_array.shape[1] > new_templates_array.shape[2]:
templates = np.reshape(new_templates_array, (new_templates_array.shape[0],
new_templates_array.shape[2],
new_templates_array.shape[1]))
else:
templates = new_templates_array
# Run the loop over all templates to get the positions
counter = 0
templates_positions = []
for template in templates:
relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)
template_median_over_time = np.median(template, axis=0)
peaks_to_median = template_median_over_time - template.min(axis=0)
peaks_to_median = peaks_to_median[relevant_channels]
relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]
peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)
peaks_to_median_sorted.append(np.median(template_median_over_time[relevant_channels]))
weights = _normalize(peaks_to_median_sorted)[:-1]
relevant_channels_positions = channel_positions[relevant_channels_sorted]
pos_x = relevant_channels_positions[0, 0]
pos_y = relevant_channels_positions[0, 1]
new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])
new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])
templates_positions.append([new_pos_x, new_pos_y])
counter += 1
if not (counter % 100):
print('Completed ' + str(counter) + ' templates')
templates_positions = np.array(templates_positions)
np.save(os.path.join(base_folder, ct.WEIGHTED_TEMPLATE_POSITIONS_FILENAME), templates_positions)
return np.array(templates_positions)
def get_y_spread_regions_of_bad_channel_groups(base_folder, bad_channel_groups):
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
bad_channel_groups_y_spreads = []
for bc_group in bad_channel_groups:
bc_positions = channel_positions[bc_group]
top = bc_positions[:, 1].max()
bottom = bc_positions[:, 1].min()
bad_channel_groups_y_spreads.append([bottom, top])
return bad_channel_groups_y_spreads
def view_spike_positions(spike_positions, brain_regions, probe_dimensions, labels_offset=80, font_size=20):
"""
Plot the spike positions as a scatter plot on a probe marked with brain regions
:param spike_positions: the x,y positions of the spikes
:type spike_positions: (np.array((N,2)))
:param brain_regions: a dictionary with keys the names of the brain regions underneath the demarcating lines and
values the y position on the probe of the demarcating lines
:type brain_regions: dict
:param probe_dimensions: the x and y limits of the probe
:type probe_dimensions: (np.array(2))
"""
fig = plt.figure()
ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])
ax.scatter(spike_positions[:, 0], spike_positions[:, 1], s=5)
ax.set_xlim(0, probe_dimensions[0])
ax.set_ylim(0, probe_dimensions[1])
ax.yaxis.set_ticks(np.arange(0, probe_dimensions[1], 100))
ax.tick_params(axis='y', direction='in', length=5, width=1, colors='b')
for region in brain_regions:
ax.text(2, brain_regions[region] - labels_offset, region, fontsize=font_size)
ax.plot([0, probe_dimensions[0]], [brain_regions[region], brain_regions[region]], 'k--', linewidth=2)
return fig, ax
def view_grouped_templates_positions(base_folder, brain_regions, probe_dimensions, position_multiplier=1,
bad_channel_regions=None, template_info=None, labels_offset=80,
font_size=20, dot_sizes=None,
func_to_run_on_click=None, args_of_func=None):
"""
:param base_folder: the folder where all the npy arrays (template_markings etc.) are saved
:type base_folder: string
:param brain_regions: a dictionary with keys the names of the brain regions underneath the demarcating lines and
values the y position on the probe of the demarcating lines
:param probe_dimensions: the dimensions of the probe
:type probe_dimensions: np.array(2)
:type brain_regions: (dict{string: float})
:param position_multiplier: a number multiplying the positions so that the numbers are not the arbitrary ones from
the prb file but correspond to the length of the probe
:type position_multiplier: float
:param template_info: If provided the template_info will be used to define the types of the templates. It assumes
the length of the template_info and of the loaded weighted_template_positions array is the same unless the
template_info has template positions (position X and position Y column) in it. In this case these are used. Also
the template_info is used to know which template is clicked on the figure for the on_pick event
:type template_info: pd.Dataframe
:param labels_offset: offset of the labels on the plot
:type labels_offset: (int)
:param font_size: the font size of the labels
:type font_size: (int)
:param func_to_run_on_click: The function to run on a click of a scatter point. It assumes that the first argument
it needs is the template row of the template_info that was click.
:type func_to_run_on_click: Func
:param args_of_func: The arguments of the function to run on on_pick (after the template itself)
:type args_of_func: list of objects
:return:
"""
template_positions = np.squeeze(
position_multiplier * np.load(os.path.join(base_folder, ct.TEMPLATE_POSITIONS_FILENAME)))
if template_info is None:
template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))
clean_template_markings = np.squeeze(template_markings[np.argwhere(template_markings > 0)])
else:
clean_template_markings = np.empty((len(template_info)))
for t in ct.types:
clean_template_markings[template_info['type'] == ct.types[t]] = t
if ~np.isnan(template_info.iloc[0]['position X']):
template_positions = template_info[['position X', 'position Y']].values * position_multiplier
def on_pick(event):
xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
ind = event.ind[0]
x = template_positions[ind, 0]
y = template_positions[ind, 1]
print('________________________')
print('x, y of mouse: {:.2f},{:.2f}'.format(xmouse, ymouse))
print('Position: {}, {}'.format(str(x), str(y)))
print('------------------------')
if template_info is not None:
template_number = template_info.iloc[ind]['template number']
print('Template number = {}'.format(template_number))
print('Firing frequency = {}'.format(template_info.iloc[ind]['firing rate']))
print('Number of spikes = {}'.format(template_info.iloc[ind]['number of spikes']))
print('________________________')
if func_to_run_on_click is not None:
if args_of_func is None:
func_to_run_on_click(template_info.iloc[ind])
else:
func_to_run_on_click(template_info.iloc[ind], *args_of_func)
types = np.flipud(np.unique(clean_template_markings))
fig = plt.figure()
ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])
fig.canvas.callbacks.connect('pick_event', on_pick)
tolerance = 1
cm = plt.cm.cool
type_to_color = {1: (0, 61/255, 1, 1), 2: (27/255, 221/255, 206/255, 1), 3: (99/255, 214/255, 39/255, 1),
4: (255/255, 183/255, 0/255, 1), 5: (100/255, 100/255, 100/255, 1),
6: (170 / 255, 170 / 255, 170 / 255, 1), 7: (240/255, 240/255, 240/255, 1)}
type_to_size = {1: 60, 2: 50, 3: 40, 4: 40, 5: 40, 6: 40, 7: 40}
colors = np.array(len(template_positions) * [(0, 0, 0, 1)]).astype(float)
sizes = np.array(len(template_positions) * [40.0])
for type in types:
indices_of_templates_of_type = np.squeeze(np.argwhere(clean_template_markings == type)).astype(np.int)
if np.size(indices_of_templates_of_type) < 2:
colors[indices_of_templates_of_type] = type_to_color[type]
else:
colors[indices_of_templates_of_type] = [type_to_color[type]]
if dot_sizes is None:
sizes[indices_of_templates_of_type] = type_to_size[type]
if dot_sizes is not None:
sizes = dot_sizes
ax.scatter(template_positions[:, 0], template_positions[:, 1], s=sizes, c=colors,
picker=tolerance)
ax.set_xlim(0, probe_dimensions[0])
ax.set_ylim(0, probe_dimensions[1])
ax.yaxis.set_ticks(np.arange(0, probe_dimensions[1], 100))
ax.tick_params(axis='y', direction='in', length=5, width=1, colors='b')
if bad_channel_regions is not None:
for bc_region in bad_channel_regions:
bc_region = np.array(bc_region) * position_multiplier
ax.add_patch(Rectangle((0, bc_region[0]),
100, bc_region[1] - bc_region[0],
facecolor="grey", alpha=0.5))
for region in brain_regions:
ax.text(2, brain_regions[region] - labels_offset, region, fontsize=font_size)
ax.plot([0, probe_dimensions[0]], [brain_regions[region], brain_regions[region]], 'k--', linewidth=2)
return fig, ax
|
14,319 | 66f6070b1cbf5a2715a175cc17eebb1aaf4c772e | import nltk
import codecs
from num2words import num2words
from nltk.stem.snowball import PortugueseStemmer
import string
import pickle
import numpy
import re
import json
from gensim.models import KeyedVectors
from nltk.stem import WordNetLemmatizer
import pandas as pd
import math
import wordnet
_stemmer = PortugueseStemmer()
stopwords = nltk.corpus.stopwords.words('portuguese')
word_tokenize = nltk.word_tokenize
punct = string.punctuation
"""
Author: Allan Barcelos
"""
def filter_stopwords(tokens):
"""Docstring."""
return [i.lower() for i in tokens if
i.lower() not in stopwords and i not in punct]
def get_palavras_words(tree):
"""Docstring."""
tree = tree.replace('\\n', '\n')
return re.findall('\[(.*)\]', tree)
def factor(col, codeDict):
''' call example: factor(data["Loan_Status"], {'N':0,'Y':1})'''
colCoded = pd.Series(col, copy=True)
for key, value in codeDict.items():
colCoded.replace(key, value, inplace=True)
return colCoded
def replace_missing(x, to = 'desconhecido'):
if all(isinstance(y, int) for y in x):
return [to if math.isnan(y) else y for y in x]
elif all(isinstance(y, int) for y in x):
return [to if math.isnan(y) else y for y in x]
else:
return [to if y is None else y for y in x]
def normalize(x):
return [((y - min(x))/(max(x) - min(x))) for y in x]
def binning(col, cut_points, labels=None):
#Define min and max values:
minval = col.min()
maxval = col.max()
#create list by adding min and max to cut_points
break_points = [minval] + cut_points + [maxval]
#if no labels provided, use default labels 0 ... (n-1)
if not labels:
labels = range(len(cut_points)+1)
#Binning using cut function of pandas
colBin = pd.cut(col,bins=break_points,labels=labels,include_lowest=True)
return colBin
def count_missing(x):
return sum(x.isnull())
def stemming(x):
return _stemmer.stem(x)
def replace_word_synonyms(sentence1, sentence2, language, synonym_deep = 2):
sentence1 = sentence1.split()
sentence2 = sentence2.split()
sentence2_grams = ['_'.join(x) for x in ngrams(sentence2, 3)]
lt = []
for t in sentence1:
if t in sentence2:
lt.append(t)
else:
ths = wordnet.find_synonyms(t, language, synonym_deep)
ts = t
for th in ths:
if th in sentence2 or th in sentence2_grams:
ts = th
break
lt.append(ts)
return ' '.join(lt)
def replace_word_hyponym(sentence1, sentence2, language, hyponym_deep = 2):
sentence1 = sentence1.split()
sentence2 = sentence2.split()
sentence2_grams = ['_'.join(x) for x in ngrams(sentence2, 3)]
lt = []
for t in sentence1:
if t in sentence2:
lt.append(t)
else:
ths = wordnet.find_hyponyms(t, language, hyponym_deep)
ts = t
for th in ths:
if th in sentence2 or th in sentence2_grams:
ts = th
break
lt.append(ts)
return ' '.join(lt)
def replace_word_hypernyms(sentence1, sentence2, language, hyperonym_deep = 2):
sentence1 = sentence1.split()
sentence2 = sentence2.split()
sentence2_grams = ['_'.join(x) for x in ngrams(sentence2, 3)]
lt = []
for t in sentence1:
if t in sentence2:
lt.append(t)
else:
ths = wordnet.find_hypernyms(t, language, hyperonym_deep)
ts = t
for th in ths:
if th in sentence2 or th in sentence2_grams:
ts = th
break
lt.append(ts)
return ' '.join(lt)
def replace_word_hypernyms_of_synonyms(sentence1, sentence2, language, synonym_deep = 2, hyperonym_deep = 2):
sentence1 = sentence1.split()
sentence2 = sentence2.split()
sentence2_grams = ['_'.join(x) for x in ngrams(sentence2, 3)]
lt = []
for t in sentence1:
if t in sentence2:
lt.append(t)
else:
reserved_words = wordnet.find_synonyms(t, language, synonym_deep)
reserved_words.append([wordnet.find_hypernyms(x, language, hyperonym_deep) for x in reserved_words])
changed_word = t
for x in reserved_words:
if x in sentence2 or x in sentence2_grams:
changed_word = x.replace('_', ' ')
lt.append(changed_word)
return ' '.join(lt)
def is_number_tryexcept(s):
""" Returns True is string is a number. """
try:
float(s)
return True
except ValueError:
return False
def number_to_word(word, language):
if language == 'por':
language = 'pt_BR'
elif language == 'eng':
language = 'en'
try:
return num2words(float(word), to = 'cardinal', lang = language)
except NotImplementedError:
return word
# Retorna os ngramas variando de 1 até N
def ngrams(s, max_n):
if isinstance(s, list) == False:
s = s.split()
grams = list()
n = 1
while n <= max_n:
k = nltk.ngrams(s, n)
n = n + 1
grams.extend([' '.join(i) for i in k])
return grams |
14,320 | d6f44df6cd7cf8adc02bb50f92e03b57b84f3ed9 | #sum of fibnocci sequence in a given range using for loop
def fibnocci_sum(n):
a=0
b=1
s=a+b
for i in range(2,n):
c=a+b
s+=c
a=b
b=c
return s
n=int(input())
print(fibnocci_sum(n))
#sum of fibnocci sequence in a given range using while loop
def fibnocci_sum(n):
a=0
b=1
s=a+b
i=2
while i<n:
s+=a+b
a,b=b,a+b
i+=1
return s
n=int(input())
print(fibnocci_sum(n))
|
14,321 | 716909da20865f8f42ecd388262c941ebda84a9c | """Derived / analysis assets that aren't simple to construct.
This is really too large & generic of a category. Should we have an asset group for each
set of related analyses? E.g.
* mcoe_assets
* service_territory_assets
* heat_rate_assets
* state_demand_assets
* depreciation_assets
* plant_parts_eia_assets
* ferc1_eia_record_linkage_assets
Not sure what the right organization is but they'll be defined across a bunch of
different modules. Eventually I imagine these would just be the novel derived values,
probably in pretty skinny tables, which get joined / aggregated with other data in the
denormalized tables.
"""
import pandas as pd
from dagster import asset
import pudl
logger = pudl.logging_helpers.get_logger(__name__)
@asset(io_manager_key="pudl_sqlite_io_manager", compute_kind="Python")
def utility_analysis(utils_eia860: pd.DataFrame) -> pd.DataFrame:
"""Example of how to create an analysis table that depends on an output view.
This final dataframe will be written to the database (without a schema).
"""
# Do some analysis on utils_eia860
return utils_eia860
|
14,322 | a6f0e9220b7fc6b428d0dd58d39c7748b9926bb1 | from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import date
import hashlib
from dateutil.relativedelta import relativedelta
from corehq.apps.locations.models import SQLLocation
from corehq.apps.userreports.models import StaticDataSourceConfiguration, get_datasource_config
from corehq.apps.userreports.util import get_table_name
from custom.icds_reports.const import (
AGG_COMP_FEEDING_TABLE,
AGG_CCS_RECORD_PNC_TABLE,
AGG_CHILD_HEALTH_PNC_TABLE,
AGG_CHILD_HEALTH_THR_TABLE,
AGG_GROWTH_MONITORING_TABLE,
DASHBOARD_DOMAIN
)
from six.moves import range
def transform_day_to_month(day):
return day.replace(day=1)
def month_formatter(day):
return transform_day_to_month(day).strftime('%Y-%m-%d')
class BaseICDSAggregationHelper(object):
"""Defines an interface for aggregating data from UCRs to specific tables
for the dashboard.
All aggregate tables are partitioned by state and month
Attributes:
ucr_data_source_id - The UCR data source that contains the raw data to aggregate
aggregate_parent_table - The parent table defined in models.py that will contain aggregate data
aggregate_child_table_prefix - The prefix for tables that inherit from the parent table
"""
ucr_data_source_id = None
aggregate_parent_table = None
aggregate_child_table_prefix = None
child_health_monthly_ucr_id = 'static-child_cases_monthly_tableau_v2'
ccs_record_monthly_ucr_id = 'static-ccs_record_cases_monthly_tableau_v2'
def __init__(self, state_id, month):
self.state_id = state_id
self.month = transform_day_to_month(month)
@property
def domain(self):
# Currently its only possible for one domain to have access to the ICDS dashboard per env
return DASHBOARD_DOMAIN
@property
def ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, self.ucr_data_source_id)
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def generate_child_tablename(self, month=None):
month = month or self.month
month_string = month_formatter(month)
hash_for_table = hashlib.md5(self.state_id + month_string).hexdigest()[8:]
return self.aggregate_child_table_prefix + hash_for_table
def create_table_query(self, month=None):
month = month or self.month
month_string = month_formatter(month)
tablename = self.generate_child_tablename(month)
return """
CREATE TABLE IF NOT EXISTS "{child_tablename}" (
CHECK (month = %(month_string)s AND state_id = %(state_id)s),
LIKE "{parent_tablename}" INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES
) INHERITS ("{parent_tablename}")
""".format(
parent_tablename=self.aggregate_parent_table,
child_tablename=tablename,
), {
"month_string": month_string,
"state_id": self.state_id
}
def drop_table_query(self):
tablename = self.generate_child_tablename(self.month)
return 'DROP TABLE IF EXISTS "{tablename}"'.format(tablename=tablename)
def data_from_ucr_query(self):
"""Returns (SQL query, query parameters) from the UCR data table that
puts data in the form expected by the aggregate table
"""
raise NotImplementedError
def aggregate_query(self):
"""Returns (SQL query, query parameters) that will aggregate from a UCR
source to an aggregate table.
"""
raise NotImplementedError
def compare_with_old_data_query(self):
"""Used for backend migrations from one data source to another. Returns
(SQL query, query parameters) that will return any rows that are
inconsistent from the old data to the new.
"""
raise NotImplementedError
class ComplementaryFormsAggregationHelper(BaseICDSAggregationHelper):
ucr_data_source_id = 'static-complementary_feeding_forms'
aggregate_parent_table = AGG_COMP_FEEDING_TABLE
aggregate_child_table_prefix = 'icds_db_child_cf_form_'
@property
def _old_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, self.child_health_monthly_ucr_id)
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def data_from_ucr_query(self):
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
return """
SELECT DISTINCT child_health_case_id AS case_id,
LAST_VALUE(timeend) OVER w AS latest_time_end,
MAX(play_comp_feeding_vid) OVER w AS play_comp_feeding_vid,
MAX(comp_feeding) OVER w AS comp_feeding_ever,
MAX(demo_comp_feeding) OVER w AS demo_comp_feeding,
MAX(counselled_pediatric_ifa) OVER w AS counselled_pediatric_ifa,
LAST_VALUE(comp_feeding) OVER w AS comp_feeding_latest,
LAST_VALUE(diet_diversity) OVER w AS diet_diversity,
LAST_VALUE(diet_quantity) OVER w AS diet_quantity,
LAST_VALUE(hand_wash) OVER w AS hand_wash
FROM "{ucr_tablename}"
WHERE timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND state_id = %(state_id)s
WINDOW w AS (
PARTITION BY child_health_case_id
ORDER BY timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
)
""".format(ucr_tablename=self.ucr_tablename), {
"current_month_start": current_month_start,
"next_month_start": next_month_start,
"state_id": self.state_id
}
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
previous_month_tablename = self.generate_child_tablename(month - relativedelta(months=1))
ucr_query, ucr_query_params = self.data_from_ucr_query()
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
query_params.update(ucr_query_params)
# GREATEST calculations are for when we want to know if a thing has
# ever happened to a case.
# CASE WHEN calculations are for when we want to know if a case
# happened during the last form for this case. We must use CASE WHEN
# and not COALESCE as when questions are skipped they will be NULL
# and we want NULL in the aggregate table
return """
INSERT INTO "{tablename}" (
state_id, month, case_id, latest_time_end_processed, comp_feeding_ever,
demo_comp_feeding, counselled_pediatric_ifa, play_comp_feeding_vid,
comp_feeding_latest, diet_diversity, diet_quantity, hand_wash
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
COALESCE(ucr.case_id, prev_month.case_id) AS case_id,
GREATEST(ucr.latest_time_end, prev_month.latest_time_end_processed) AS latest_time_end_processed,
GREATEST(ucr.comp_feeding_ever, prev_month.comp_feeding_ever) AS comp_feeding_ever,
GREATEST(ucr.demo_comp_feeding, prev_month.demo_comp_feeding) AS demo_comp_feeding,
GREATEST(ucr.counselled_pediatric_ifa, prev_month.counselled_pediatric_ifa) AS counselled_pediatric_ifa,
GREATEST(ucr.play_comp_feeding_vid, prev_month.play_comp_feeding_vid) AS play_comp_feeding_vid,
CASE WHEN ucr.latest_time_end IS NOT NULL
THEN ucr.comp_feeding_latest ELSE prev_month.comp_feeding_latest
END AS comp_feeding_latest,
CASE WHEN ucr.latest_time_end IS NOT NULL
THEN ucr.diet_diversity ELSE prev_month.diet_diversity
END AS diet_diversity,
CASE WHEN ucr.latest_time_end IS NOT NULL
THEN ucr.diet_quantity ELSE prev_month.diet_quantity
END AS diet_quantity,
CASE WHEN ucr.latest_time_end IS NOT NULL
THEN ucr.hand_wash ELSE prev_month.hand_wash
END AS hand_wash
FROM ({ucr_table_query}) ucr
FULL OUTER JOIN "{previous_month_tablename}" prev_month
ON ucr.case_id = prev_month.case_id
)
""".format(
ucr_table_query=ucr_query,
previous_month_tablename=previous_month_tablename,
tablename=tablename
), query_params
def compare_with_old_data_query(self):
"""Compares data from the complementary feeding forms aggregate table
to the the old child health monthly UCR table that current aggregate
script uses
"""
month = self.month.replace(day=1)
return """
SELECT agg.case_id
FROM "{child_health_monthly_ucr}" chm_ucr
FULL OUTER JOIN "{new_agg_table}" agg
ON chm_ucr.doc_id = agg.case_id AND chm_ucr.month = agg.month AND agg.state_id = chm_ucr.state_id
WHERE chm_ucr.month = %(month)s and agg.state_id = %(state_id)s AND (
(chm_ucr.cf_eligible = 1 AND (
chm_ucr.cf_in_month != agg.comp_feeding_latest OR
chm_ucr.cf_diet_diversity != agg.diet_diversity OR
chm_ucr.cf_diet_quantity != agg.diet_quantity OR
chm_ucr.cf_handwashing != agg.hand_wash OR
chm_ucr.cf_demo != agg.demo_comp_feeding OR
chm_ucr.counsel_pediatric_ifa != agg.counselled_pediatric_ifa OR
chm_ucr.counsel_comp_feeding_vid != agg.play_comp_feeding_vid
)) OR (chm_ucr.cf_initiation_eligible = 1 AND chm_ucr.cf_initiated != agg.comp_feeding_ever)
)
""".format(
child_health_monthly_ucr=self._old_ucr_tablename,
new_agg_table=self.aggregate_parent_table,
), {
"month": month.strftime('%Y-%m-%d'),
"state_id": self.state_id
}
class PostnatalCareFormsChildHealthAggregationHelper(BaseICDSAggregationHelper):
ucr_data_source_id = 'static-postnatal_care_forms'
aggregate_parent_table = AGG_CHILD_HEALTH_PNC_TABLE
aggregate_child_table_prefix = 'icds_db_child_pnc_form_'
@property
def _old_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, self.child_health_monthly_ucr_id)
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def data_from_ucr_query(self):
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
return """
SELECT DISTINCT child_health_case_id AS case_id,
LAST_VALUE(timeend) OVER w AS latest_time_end,
MAX(counsel_increase_food_bf) OVER w AS counsel_increase_food_bf,
MAX(counsel_breast) OVER w AS counsel_breast,
MAX(skin_to_skin) OVER w AS skin_to_skin,
LAST_VALUE(is_ebf) OVER w AS is_ebf,
LAST_VALUE(water_or_milk) OVER w AS water_or_milk,
LAST_VALUE(other_milk_to_child) OVER w AS other_milk_to_child,
LAST_VALUE(tea_other) OVER w AS tea_other,
LAST_VALUE(eating) OVER w AS eating,
MAX(counsel_exclusive_bf) OVER w AS counsel_exclusive_bf,
MAX(counsel_only_milk) OVER w AS counsel_only_milk,
MAX(counsel_adequate_bf) OVER w AS counsel_adequate_bf,
LAST_VALUE(not_breastfeeding) OVER w AS not_breastfeeding
FROM "{ucr_tablename}"
WHERE timeend >= %(current_month_start)s AND
timeend < %(next_month_start)s AND
state_id = %(state_id)s AND
child_health_case_id IS NOT NULL
WINDOW w AS (
PARTITION BY child_health_case_id
ORDER BY timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
)
""".format(ucr_tablename=self.ucr_tablename), {
"current_month_start": current_month_start,
"next_month_start": next_month_start,
"state_id": self.state_id
}
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
previous_month_tablename = self.generate_child_tablename(month - relativedelta(months=1))
ucr_query, ucr_query_params = self.data_from_ucr_query()
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
query_params.update(ucr_query_params)
return """
INSERT INTO "{tablename}" (
state_id, month, case_id, latest_time_end_processed, counsel_increase_food_bf,
counsel_breast, skin_to_skin, is_ebf, water_or_milk, other_milk_to_child,
tea_other, eating, counsel_exclusive_bf, counsel_only_milk, counsel_adequate_bf,
not_breastfeeding
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
COALESCE(ucr.case_id, prev_month.case_id) AS case_id,
GREATEST(ucr.latest_time_end, prev_month.latest_time_end_processed) AS latest_time_end_processed,
GREATEST(ucr.counsel_increase_food_bf, prev_month.counsel_increase_food_bf) AS counsel_increase_food_bf,
GREATEST(ucr.counsel_breast, prev_month.counsel_breast) AS counsel_breast,
GREATEST(ucr.skin_to_skin, prev_month.skin_to_skin) AS skin_to_skin,
ucr.is_ebf AS is_ebf,
ucr.water_or_milk AS water_or_milk,
ucr.other_milk_to_child AS other_milk_to_child,
ucr.tea_other AS tea_other,
ucr.eating AS eating,
GREATEST(ucr.counsel_exclusive_bf, prev_month.counsel_exclusive_bf) AS counsel_exclusive_bf,
GREATEST(ucr.counsel_only_milk, prev_month.counsel_only_milk) AS counsel_only_milk,
GREATEST(ucr.counsel_adequate_bf, prev_month.counsel_adequate_bf) AS counsel_adequate_bf,
ucr.not_breastfeeding AS not_breastfeeding
FROM ({ucr_table_query}) ucr
FULL OUTER JOIN "{previous_month_tablename}" prev_month
ON ucr.case_id = prev_month.case_id
)
""".format(
ucr_table_query=ucr_query,
previous_month_tablename=previous_month_tablename,
tablename=tablename
), query_params
def compare_with_old_data_query(self):
"""Compares data from the complementary feeding forms aggregate table
to the the old child health monthly UCR table that current aggregate
script uses
"""
month = self.month.replace(day=1)
return """
SELECT agg.case_id
FROM "{child_health_monthly_ucr}" chm_ucr
FULL OUTER JOIN "{new_agg_table}" agg
ON chm_ucr.doc_id = agg.case_id AND chm_ucr.month = agg.month AND agg.state_id = chm_ucr.state_id
WHERE chm_ucr.month = %(month)s and agg.state_id = %(state_id)s AND (
(chm_ucr.pnc_eligible = 1 AND (
chm_ucr.counsel_increase_food_bf != COALESCE(agg.counsel_increase_food_bf) OR
chm_ucr.counsel_manage_breast_problems != COALESCE(agg.counsel_breast, 0)
)) OR
(chm_ucr.ebf_eligible = 1 AND (
chm_ucr.ebf_in_month != COALESCE(agg.is_ebf, 0) OR
chm_ucr.ebf_drinking_liquid != (
GREATEST(agg.water_or_milk, agg.other_milk_to_child, agg.tea_other, 0)
) OR
chm_ucr.ebf_eating != COALESCE(agg.eating, 0) OR
chm_ucr.ebf_not_breastfeeding_reason != COALESCE(agg.not_breastfeeding, 'not_breastfeeding') OR
chm_ucr.counsel_ebf != GREATEST(agg.counsel_exclusive_bf, agg.counsel_only_milk, 0) OR
chm_ucr.counsel_adequate_bf != GREATEST(agg.counsel_adequate_bf, 0)
))
)
""".format(
child_health_monthly_ucr=self._old_ucr_tablename,
new_agg_table=self.aggregate_parent_table,
), {
"month": month.strftime('%Y-%m-%d'),
"state_id": self.state_id
}
class PostnatalCareFormsCcsRecordAggregationHelper(BaseICDSAggregationHelper):
ucr_data_source_id = 'static-postnatal_care_forms'
aggregate_parent_table = AGG_CCS_RECORD_PNC_TABLE
aggregate_child_table_prefix = 'icds_db_ccs_pnc_form_'
@property
def _old_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, self.ccs_record_monthly_ucr_id)
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def data_from_ucr_query(self):
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
return """
SELECT DISTINCT ccs_record_case_id AS case_id,
LAST_VALUE(timeend) OVER w AS latest_time_end,
MAX(counsel_methods) OVER w AS counsel_methods
FROM "{ucr_tablename}"
WHERE timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND state_id = %(state_id)s
WINDOW w AS (
PARTITION BY ccs_record_case_id
ORDER BY timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
)
""".format(ucr_tablename=self.ucr_tablename), {
"current_month_start": current_month_start,
"next_month_start": next_month_start,
"state_id": self.state_id
}
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
previous_month_tablename = self.generate_child_tablename(month - relativedelta(months=1))
ucr_query, ucr_query_params = self.data_from_ucr_query()
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
query_params.update(ucr_query_params)
return """
INSERT INTO "{tablename}" (
state_id, month, case_id, latest_time_end_processed, counsel_methods
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
COALESCE(ucr.case_id, prev_month.case_id) AS case_id,
GREATEST(ucr.latest_time_end, prev_month.latest_time_end_processed) AS latest_time_end_processed,
GREATEST(ucr.counsel_methods, prev_month.counsel_methods) AS counsel_methods
FROM ({ucr_table_query}) ucr
FULL OUTER JOIN "{previous_month_tablename}" prev_month
ON ucr.case_id = prev_month.case_id
)
""".format(
ucr_table_query=ucr_query,
previous_month_tablename=previous_month_tablename,
tablename=tablename
), query_params
def compare_with_old_data_query(self):
"""Compares data from the complementary feeding forms aggregate table
to the the old child health monthly UCR table that current aggregate
script uses
"""
month = self.month.replace(day=1)
return """
SELECT agg.case_id
FROM "{ccs_record_monthly_ucr}" crm_ucr
FULL OUTER JOIN "{new_agg_table}" agg
ON crm_ucr.doc_id = agg.case_id AND crm_ucr.month = agg.month AND agg.state_id = crm_ucr.state_id
WHERE crm_ucr.month = %(month)s and agg.state_id = %(state_id)s AND (
(crm_ucr.lactating = 1 OR crm_ucr.pregnant = 1) AND (
crm_ucr.counsel_fp_methods != COALESCE(agg.counsel_methods, 0) OR
(crm_ucr.pnc_visited_in_month = 1 AND
agg.latest_time_end_processed NOT BETWEEN %(month)s AND %(next_month)s)
)
)
""".format(
ccs_record_monthly_ucr=self._old_ucr_tablename,
new_agg_table=self.aggregate_parent_table,
), {
"month": month.strftime('%Y-%m-%d'),
"next_month": (month + relativedelta(month=1)).strftime('%Y-%m-%d'),
"state_id": self.state_id
}
class THRFormsChildHealthAggregationHelper(BaseICDSAggregationHelper):
ucr_data_source_id = 'static-dashboard_thr_forms'
aggregate_parent_table = AGG_CHILD_HEALTH_THR_TABLE
aggregate_child_table_prefix = 'icds_db_child_thr_form_'
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"current_month_start": current_month_start,
"next_month_start": next_month_start,
}
return """
INSERT INTO "{tablename}" (
state_id, month, case_id, latest_time_end_processed, days_ration_given_child
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
child_health_case_id AS case_id,
MAX(timeend) AS latest_time_end_processed,
SUM(days_ration_given_child) AS days_ration_given_child
FROM "{ucr_tablename}"
WHERE state_id = %(state_id)s AND
timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND
child_health_case_id IS NOT NULL
GROUP BY child_health_case_id
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=tablename
), query_params
class GrowthMonitoringFormsAggregationHelper(BaseICDSAggregationHelper):
ucr_data_source_id = 'static-dashboard_growth_monitoring_forms'
aggregate_parent_table = AGG_GROWTH_MONITORING_TABLE
aggregate_child_table_prefix = 'icds_db_gm_form_'
@property
def _old_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, self.child_health_monthly_ucr_id)
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def data_from_ucr_query(self):
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
# We need many windows here because we want the last time changed for each of these columns
# Window definitions inspired by https://stackoverflow.com/a/47223416
# The CASE/WHEN's are needed, because time end should be NULL when a form has not changed the value,
# but the windows include all forms (this works because we use LAST_VALUE and NULLs are sorted to the top
return """
SELECT
DISTINCT child_health_case_id AS case_id,
LAST_VALUE(weight_child) OVER weight_child AS weight_child,
CASE
WHEN LAST_VALUE(weight_child) OVER weight_child IS NULL THEN NULL
ELSE LAST_VALUE(timeend) OVER weight_child
END AS weight_child_last_recorded,
LAST_VALUE(height_child) OVER height_child AS height_child,
CASE
WHEN LAST_VALUE(height_child) OVER height_child IS NULL THEN NULL
ELSE LAST_VALUE(timeend) OVER height_child
END AS height_child_last_recorded,
CASE
WHEN LAST_VALUE(zscore_grading_wfa) OVER zscore_grading_wfa = 0 THEN NULL
ELSE LAST_VALUE(zscore_grading_wfa) OVER zscore_grading_wfa
END AS zscore_grading_wfa,
CASE
WHEN LAST_VALUE(zscore_grading_wfa) OVER zscore_grading_wfa = 0 THEN NULL
ELSE LAST_VALUE(timeend) OVER zscore_grading_wfa
END AS zscore_grading_wfa_last_recorded,
CASE
WHEN LAST_VALUE(zscore_grading_hfa) OVER zscore_grading_hfa = 0 THEN NULL
ELSE LAST_VALUE(zscore_grading_hfa) OVER zscore_grading_hfa
END AS zscore_grading_hfa,
CASE
WHEN LAST_VALUE(zscore_grading_hfa) OVER zscore_grading_hfa = 0 THEN NULL
ELSE LAST_VALUE(timeend) OVER zscore_grading_hfa
END AS zscore_grading_hfa_last_recorded,
CASE
WHEN LAST_VALUE(zscore_grading_wfh) OVER zscore_grading_wfh = 0 THEN NULL
ELSE LAST_VALUE(zscore_grading_wfh) OVER zscore_grading_wfh
END AS zscore_grading_wfh,
CASE
WHEN LAST_VALUE(zscore_grading_wfh) OVER zscore_grading_wfh = 0 THEN NULL
ELSE LAST_VALUE(timeend) OVER zscore_grading_wfh
END AS zscore_grading_wfh_last_recorded,
CASE
WHEN LAST_VALUE(muac_grading) OVER muac_grading = 0 THEN NULL
ELSE LAST_VALUE(muac_grading) OVER muac_grading
END AS muac_grading,
CASE
WHEN LAST_VALUE(muac_grading) OVER muac_grading = 0 THEN NULL
ELSE LAST_VALUE(timeend) OVER muac_grading
END AS muac_grading_last_recorded
FROM "{ucr_tablename}"
WHERE timeend >= %(current_month_start)s AND timeend < %(next_month_start)s
AND state_id = %(state_id)s AND child_health_case_id IS NOT NULL
WINDOW
weight_child AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN weight_child IS NULL THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
),
height_child AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN height_child IS NULL THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
),
zscore_grading_wfa AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN zscore_grading_wfa = 0 THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
),
zscore_grading_hfa AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN zscore_grading_hfa = 0 THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
),
zscore_grading_wfh AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN zscore_grading_wfh = 0 THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
),
muac_grading AS (
PARTITION BY child_health_case_id
ORDER BY
CASE WHEN muac_grading = 0 THEN 0 ELSE 1 END ASC,
timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
)
""".format(ucr_tablename=self.ucr_tablename), {
"current_month_start": current_month_start,
"next_month_start": next_month_start,
"state_id": self.state_id
}
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
previous_month_tablename = self.generate_child_tablename(month - relativedelta(months=1))
ucr_query, ucr_query_params = self.data_from_ucr_query()
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
query_params.update(ucr_query_params)
# The '1970-01-01' is a fallback, this should never happen,
# but an unexpected NULL should not block other data
return """
INSERT INTO "{tablename}" (
state_id, month, case_id, latest_time_end_processed,
weight_child, weight_child_last_recorded,
height_child, height_child_last_recorded,
zscore_grading_wfa, zscore_grading_wfa_last_recorded,
zscore_grading_hfa, zscore_grading_hfa_last_recorded,
zscore_grading_wfh, zscore_grading_wfh_last_recorded,
muac_grading, muac_grading_last_recorded
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
COALESCE(ucr.case_id, prev_month.case_id) AS case_id,
GREATEST(
ucr.weight_child_last_recorded,
ucr.height_child_last_recorded,
ucr.zscore_grading_wfa_last_recorded,
ucr.zscore_grading_hfa_last_recorded,
ucr.zscore_grading_wfh_last_recorded,
ucr.muac_grading_last_recorded,
prev_month.latest_time_end_processed,
'1970-01-01'
) AS latest_time_end_processed,
COALESCE(ucr.weight_child, prev_month.weight_child) AS weight_child,
GREATEST(ucr.weight_child_last_recorded, prev_month.weight_child_last_recorded) AS weight_child_last_recorded,
COALESCE(ucr.height_child, prev_month.height_child) AS height_child,
GREATEST(ucr.height_child_last_recorded, prev_month.height_child_last_recorded) AS height_child_last_recorded,
COALESCE(ucr.zscore_grading_wfa, prev_month.zscore_grading_wfa) AS zscore_grading_wfa,
GREATEST(ucr.zscore_grading_wfa_last_recorded, prev_month.zscore_grading_wfa_last_recorded) AS zscore_grading_wfa_last_recorded,
COALESCE(ucr.zscore_grading_hfa, prev_month.zscore_grading_hfa) AS zscore_grading_hfa,
GREATEST(ucr.zscore_grading_hfa_last_recorded, prev_month.zscore_grading_hfa_last_recorded) AS zscore_grading_hfa_last_recorded,
COALESCE(ucr.zscore_grading_wfh, prev_month.zscore_grading_wfh) AS zscore_grading_wfh,
GREATEST(ucr.zscore_grading_wfh_last_recorded, prev_month.zscore_grading_wfh_last_recorded) AS zscore_grading_wfh_last_recorded,
COALESCE(ucr.muac_grading, prev_month.muac_grading) AS muac_grading,
GREATEST(ucr.muac_grading_last_recorded, prev_month.muac_grading_last_recorded) AS muac_grading_last_recorded
FROM ({ucr_table_query}) ucr
FULL OUTER JOIN "{previous_month_tablename}" prev_month
ON ucr.case_id = prev_month.case_id
)
""".format(
ucr_table_query=ucr_query,
previous_month_tablename=previous_month_tablename,
tablename=tablename
), query_params
def compare_with_old_data_query(self):
# only partially implements this comparison for now
month = self.month.replace(day=1)
return """
SELECT agg.case_id
FROM "{child_health_monthly_ucr}" chm_ucr
FULL OUTER JOIN "{new_agg_table}" agg
ON chm_ucr.doc_id = agg.case_id AND chm_ucr.month = agg.month AND agg.state_id = chm_ucr.state_id
WHERE chm_ucr.month = %(month)s and agg.state_id = %(state_id)s AND
(chm_ucr.wer_eligible = 1 AND (
(chm_ucr.nutrition_status_last_recorded = 'severely_underweight' AND agg.zscore_grading_wfa = 1) OR
(chm_ucr.nutrition_status_last_recorded = 'moderately_underweight' AND agg.zscore_grading_wfa = 2) OR
(chm_ucr.nutrition_status_last_recorded = 'normal' AND agg.zscore_grading_wfa IN (3,4)) OR
(chm_ucr.nutrition_status_last_recorded IS NULL AND agg.zscore_grading_wfa = 0) OR
(chm_ucr.weight_recorded_in_month = agg.weight_child AND agg.latest_time_end_processed BETWEEN %(month)s AND %(next_month)s)
))
""".format(
child_health_monthly_ucr=self._old_ucr_tablename,
new_agg_table=self.aggregate_parent_table,
), {
"month": month.strftime('%Y-%m-%d'),
"next_month": (month + relativedelta(month=1)).strftime('%Y-%m-%d'),
"state_id": self.state_id
}
def recalculate_aggregate_table(model_class):
"""Expects a class (not instance) of models.Model
Not expected to last past 2018 (ideally past May) so this shouldn't break in 2019
"""
state_ids = (
SQLLocation.objects
.filter(domain='icds-cas', location_type__name='state')
.values_list('id', flat=True)
)
for state_id in state_ids:
for year in (2015, 2016, 2017):
for month in range(1, 13):
model_class.aggregate(state_id, date(year, month, 1))
for month in range(1, date.today().month + 1):
model_class.aggregate(state_id, date(2018, month, 1))
|
14,323 | c63d9281e8fd8b1e6b62974083010d809ac4b505 | import unittest
import os
import time
import requests as bare_requests
from utils.requests import RequestsWrapper
from utils.metrics import timer
from copy import deepcopy
from unittest.mock import patch
HTTPBIN_URL = os.getenv('HTTPBIN_URL', "http://localhost:8000")
requests = RequestsWrapper()
def test_wrapper(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return test_wrapper.__name__
return wrapper
class RequestsWrapperFunctionalTest(unittest.TestCase):
def setUp(self):
self.httpbin = dict(
url=HTTPBIN_URL,
endpoints=dict(
delete="/delete",
get="/get",
patch="/patch",
post="/post",
put="/put"
)
)
self.data = dict()
self.kwargs = dict()
def tearDown(self):
self.data = {}
def test_GET(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
res = requests.get(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(200, res.status_code)
def test_PATCH(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["patch"]
)
res = requests.patch(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(200, res.status_code)
def test_POST(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["post"]
)
res = requests.post(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(200, res.status_code)
def test_PUT(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["put"]
)
res = requests.put(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(200, res.status_code)
def test_DELETE(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["delete"]
)
res = requests.delete(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(200, res.status_code)
def test_no_attribute_exception_handling(self):
with patch.object(bare_requests, 'get') as requests_mock:
requests_mock.side_effect = SyntaxError('Houston!')
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
with self.assertRaises(SyntaxError):
requests.get(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
def test_no_general_exception_handling(self):
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
with self.assertRaises(AttributeError):
requests.none(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
def test_disabling_wrapping(self):
requests.wrapper = test_wrapper
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
res = requests.get(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(test_wrapper.__name__, res)
self.assertEqual(test_wrapper.__name__, requests.wrapper.__name__)
# set wrapper to None
requests.wrapper = None
res = requests.get(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(None, requests.wrapper)
self.assertNotEqual(test_wrapper.__name__, res)
self.assertEqual(200, res.status_code)
def test_bring_your_own_wrapper(self):
requests.wrapper = test_wrapper
url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
res = requests.get(
url,
data=self.data,
timeout=30,
verify=False,
**self.kwargs
)
self.assertEqual(res, test_wrapper.__name__)
class RequestsWrapperPerformanceTest(unittest.TestCase):
def setUp(self):
self.httpbin = dict(
url=HTTPBIN_URL,
endpoints=dict(
delete="/delete",
get="/get",
patch="/patch",
post="/post",
put="/put"
)
)
self.data = {}
self.kwargs = dict(number=1000)
self.url = "{httpbin_url}{endpoint}".format(
httpbin_url=self.httpbin["url"],
endpoint=self.httpbin["endpoints"]["get"]
)
@staticmethod
def timed(fun, *args, **kwargs):
number = 1000
if 'number' in kwargs.keys():
number = kwargs.pop('number')
test_num = []
for i in range(number):
t0 = time.time()
r = fun(*args, **kwargs)
assert r.status_code == 200
time_taken = (time.time() - t0) * number
test_num.append(time_taken)
average_time = sum(test_num) / len(test_num)
print('[timer]: func `{}` execution took {} ms.'.format(fun.__name__,
average_time))
return average_time
def bare_requests_timed(self, number=1):
import requests
kwargs = deepcopy(self.kwargs)
kwargs['number'] = number
return self.timed(
requests.get,
self.url,
data=self.data,
timeout=30,
verify=False,
**kwargs
)
def wrapped_requests_timed(self, number=1, wrapper=timer('test')):
from utils.requests import RequestsWrapper
requests = RequestsWrapper()
requests.wrapper = wrapper
kwargs = deepcopy(self.kwargs)
kwargs['number'] = number
return self.timed(
requests.get,
self.url,
data=self.data,
timeout=30,
verify=False,
**kwargs
)
def test_request_performance_vs_bare_requests(self):
for number in [1, 10, 100, 500, 1000]:
print("")
print("RUNNING {0} TIMES".format(number))
print("--------------------------------")
print("1. bare requests")
b_req_time = self.bare_requests_timed(number=number)
print("* time = {0} ms".format(b_req_time))
print("2. wrapped requests")
h_req_time = self.wrapped_requests_timed(number=number)
print("* time = {0} ms".format(h_req_time))
time_diff = h_req_time - b_req_time
print("time diff (wrapped requests time - bare requests time) "
"= {0} ms".format(time_diff))
print("")
# Assert time difference not greater than 1ms
# TODO: Find a good way to determine best benchmark
self.assertTrue(time_diff < 5)
|
14,324 | 8f1f1a04e60bc5c21a8bd6d15acd21d6b2c1a2b1 | from itertools import groupby
from operator import itemgetter
from django.forms import BaseInlineFormSet
from core.models import RepeatingScrumEntry, JournalEntryTemplate
class ScrumEntryInlineFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
if kwargs['instance'].pk is None:
initial = kwargs.get('initial', [])
initial += self.get_initial()
kwargs['initial'] = initial
super(ScrumEntryInlineFormSet, self).__init__(*args, **kwargs)
def get_initial(self):
entries = RepeatingScrumEntry.active_qs().values('title', 'tags')
initial = []
for title, group in groupby(entries, itemgetter('title')):
initial.append({
'title': title,
'tags': [row['tags'] for row in group]
})
return initial
class JournalEntryInlineFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
if kwargs['instance'].pk is None:
initial = kwargs.get('initial', [])
initial += self.get_initial()
kwargs['initial'] = initial
super(JournalEntryInlineFormSet, self).__init__(*args, **kwargs)
def get_initial(self):
entries = JournalEntryTemplate.active_qs().values('title', 'response', 'tags')
initial = []
for title, group in groupby(entries, itemgetter('title')):
group = list(group)
initial.append({
'title': title,
'response': group[0]['response'],
'tags': [row['tags'] for row in group]
})
return initial
|
14,325 | d9dccd0406eec674f27e80d92eee244a1dcb3359 | def string_bits(str):
x=len(str)
l=""
i=0
while i<x :
l=l+str[i]
i=i+2
return l
print(string_bits('Hello') )
print(string_bits('Hi') )
print(string_bits('Heeololeo'))
|
14,326 | a11478d86fed5658c00a0cddaf146f3cd64dbfe4 | import sys,re,json,operator
from os import listdir
from os.path import isfile, join
docs={}
actors={}
def readMetadata(nameFile):
global docs, actors
file=open(nameFile)
for line in file:
cols=line.rstrip().lower().split("\t")
if len(cols) > 12:
id=cols[0]
fbid=cols[10]
actor=cols[12]
name=cols[3]
actors[fbid]=actor
myindex=None
if id in docs:
myindex=docs[id]
else:
docs[id]={}
parts=name.split(" ")
for p in parts:
docs[id][p]=fbid
file.close()
# print>>sys.stderr, len(actors), actors.items()[:5]
# print>>sys.stderr, len(docs), docs.items()[:5]
def main(nameFile, outDirectory):
onlyfiles = [ f for f in listdir(nameFile) if isfile(join(nameFile,f)) ]
for f in onlyfiles:
outfile="%s/%s"% (outDirectory,f)
out=open(outfile, "w" )
key=f
key=re.sub(".sent", "", f)
index=None
dirf= "%s/%s" % (nameFile, f)
file=open(dirf)
for line in file:
# locate the movie id in the DOC header, which must be before everything else
if line.startswith("=== DOC"):
cols=line.rstrip().split(" ")
key=cols[2]
if key in docs:
index=docs[key]
cols=line.rstrip().split("\t")
if len(cols) > 1:
id=cols[0]
if id.startswith("E"):
info = json.loads(cols[1])
mdict=info['lemma_c']
sorted_mdict = sorted(iter(mdict.items()), key=operator.itemgetter(1), reverse=True)
count=0
best=""
for name,count in sorted_mdict:
n=name.lower()
count+=mdict[name]
if n in index:
best=index[n]
break
if best == "" and count > 5:
# try harder
pass
#if count >= 2:
# print "%s\t\t%s\t%s" % (best,count,mdict)
actor=""
if best in actors:
actor=actors[best]
info["fb"]=best
info["fba"]=actor
out.write(id + "\t" + json.dumps(info) + "\n")
else:
out.write(line.rstrip() + "\n")
else:
out.write(line.rstrip() + "\n")
file.close()
out.close()
if __name__ == "__main__":
#python ~/char_matcher.py metadata/all.character.metadata prc/ prcn/
# prc = folder containing post coreproc.py processed docs
# prcn = output directory, containing one file for each in prc/
readMetadata(sys.argv[1])
main(sys.argv[2], sys.argv[3])
|
14,327 | 1bd038809feab8db26ff641e1ef94337d038cadb | def SortColors(nums):
zeros = 0
ones = 0
twos = len(nums)-1
while ones <= twos:
if nums[ones] == 1:
ones += 1
elif nums[ones] == 0:
nums[zeros], nums[ones] = nums[ones], nums[zeros]
zeros, ones = zeros + 1, ones +1
else:
nums[ones], nums[twos] = nums[twos], nums[ones]
twos -= 1
return nums
print(SortColors([1,2]))
'''
input = 2,0,1
I1 = 1,0,2
I2 = 1,0,2
I3 =
zeros = 0
ones = 1
twos = 1
''' |
14,328 | 8387ef8e7803792aacd562748ae6217b8e2707bd | from flask_mail import Mail, Message
from flask import Flask,render_template,request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:@localhost/bugtracking'
db = SQLAlchemy(app)
class userid(db.Model):
'''
sno, name phone_num, msg, date, email
'''
Pk_userid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
name1 = db.Column(db.String(50), nullable=False)
app.config.update(
DEBUG=True,
#EMAIL SETTINGS
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME = 'parthprecise11@gmail.com',
MAIL_PASSWORD = '8619130803'
)
mail = Mail(app)
@app.route('/send-mail/')
def send_mail():
msg = Message('Hel9l', sender='parthprecise11@gmail.com', recipients=['parthdarak11@gmail.com'])
msg.html = render_template('about.html', name='ghgh')
mail.send(msg)
return "hello"
@app.route('/hello/')
def hello():
return "hello welcome"
@app.route('/cds/')
def cds():
email= request.args.get('email')
db.session.query(userid)
q = q.filter(userid.name == email)
record = q.one()
record.name1 = 'Azure Radiance'
app.run(debug=True) |
14,329 | 47decb45d173846eb77af2e8161dd6d08aee85df | import sys
sys.path.append("../model")
sys.path.append("../data")
import numpy as np
import model_params, load_data
# params
params = model_params.BAI_PARAMS
def generate_test_data(dataset, inp_lang, targ_lang):
# init data and dict
# print("<start> id:", inp_lang.word2idx['<start>'])
# print("<end> id:", inp_lang.word2idx['<end>'])
# 模拟正常的 inputs
inputs = np.random.randint(low=1, high=params['vocab_size_input'],
size=(params['default_batch_size'], params['max_length_input']))
rand = np.random.randint(low=2, high=8, size=(params['default_batch_size'], ))
# print("rand:\n", rand)
inputs[np.arange(0, params['default_batch_size']), -rand] = 0.
for i in range(params['default_batch_size']):
inputs[i, 0] = inp_lang.word2idx['<start>']
for j in range(0, params['max_length_input']):
if inputs[i, j] == 0:
inputs[i, j-1] = inp_lang.word2idx['<end>']
for k in range(j, params['max_length_input']):
inputs[i, k] = 0
break
# print(inputs)
# 模拟正常的 targets
targets = np.random.randint(low=1, high=params['vocab_size_output'],
size=(params['default_batch_size'], params['max_length_output']))
rand = np.random.randint(low=2, high=6, size=(params['default_batch_size'], ))
# print("rand:\n", rand)
targets[np.arange(0, params['default_batch_size']), -rand] = 0.
for i in range(params['default_batch_size']):
targets[i, 0] = targ_lang.word2idx['<start>']
for j in range(0, params['max_length_output']):
if targets[i, j] == 0:
targets[i, j-1] = targ_lang.word2idx['<end>']
for k in range(j, params['max_length_output']):
targets[i, k] = 0
break
# print(targets)
return inputs, targets
if __name__ == '__main__':
generate_test_data()
|
14,330 | 97a53d24bf4c7a4a36354d4169c7a2a1845c499b | import numpy as np
def gcd(a,b):
if a % b == 0:
return b
return gcd(b, a%b)
N, X = map(int, input().split())
x = list(map(int, input().split()))
s = np.array(x)
s = abs(s-X)
if len(s) == 1:
print(s[0])
exit()
for i in range(1, len(s)):
if i == 1:
D = gcd(s[i],s[i-1])
D = gcd(s[i], D)
print(D)
|
14,331 | bbcc481f6bcd4b99b151ef24426c60fa9ef9d43f | def level_order(root: int):
# 队列
q = [root]
res = []
while len(q) != 0:
for node in q:
res.append(nodes[node])
length = len(q)
for l in range(length):
# 将同层节点依次出队
node = q.pop(0)
for next_node in graph[node]:
if nodes[next_node] not in res and next_node not in q:
q.append(next_node)
return res
if __name__ == '__main__':
ans = []
for i in range(eval(input())):
tmp = input().split()
n = int(tmp[0])
src = tmp[1]
nodes = input().split()
graph = [[] for j in range(n)]
for j in range(n):
line = input().split()
for k in range(1, len(line)):
graph[j].append(int(line[k]))
graph[int(line[k])].append(j)
ans.append(level_order(nodes.index(src)))
for i in ans:
for j in range(len(i) - 1):
print(i[j], end=' ')
print(i[-1]) |
14,332 | 51c9e995a87a3deaa4a311515a8ee59b99635370 | __author__ = 'hi_melnikov'
import tornado.web
import tornado.httpserver
import web_game
import web_site
import os
class MainHandler(tornado.web.RequestHandler):
def get(self):
web_site.get_MainHandler(self)
def post(self):
web_site.post_MainHandler(self)
class LoginHandler(tornado.web.RequestHandler):
def get(self):
web_site.get_LoginHandler(self)
def post(self):
web_site.post_LoginHandler(self)
class RegHandler(tornado.web.RequestHandler):
def get(self):
web_site.get_RegHandler(self)
def post(self):
web_site.post_RegHandler(self)
class PlayerLobboyHandler(tornado.web.RequestHandler):
def get(self):
web_site.get_PlayerLobboyHandler(self)
def post(self):
web_site.post_PlayerLobboyHandler(self)
class GameHandler(tornado.web.RequestHandler):
def get(self):
web_game.get_GameHandler(self)
class GameListHandler(tornado.web.RequestHandler):
def get(self):
web_game.get_GameListHandler(self)
class StateHandler(tornado.web.RequestHandler):
def get(self):
web_game.get_StateHandler(self)
class StatsHandler(tornado.web.RequestHandler):
def get(self):
web_game.get_StatsHandler(self)
class LeaderBoardHandler(tornado.web.RequestHandler):
def get(self):
web_game.get_LeaderBoardHandler(self)
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/regestration", RegHandler),
(r"/playerlobby", PlayerLobboyHandler),
(r"/game", GameHandler),
(r"/gamelist", GameListHandler),
(r"/state", StateHandler),
(r"/stats", StatsHandler),
(r"/leaderboard", LeaderBoardHandler),
(r'/styles/(.*)', tornado.web.StaticFileHandler,
{'path': os.path.dirname(__file__)+"styles/"}),]
settings = {}
super(Application, self).__init__(handlers, **settings)
def main():
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
14,333 | 3b9c43f4840ccecd40e152ecf90daec157d7cc9a | import unittest
from unittest import mock
import cherry
class ApiTest(unittest.TestCase):
def setUp(self):
self.model = 'foo'
self.text = 'random string'
@mock.patch('cherry.api.Classify')
def test_classify_api(self, mock_classify):
cherry.classify(model=self.model, text=self.text)
mock_classify.assert_called_once_with(model=self.model, text=self.text)
@mock.patch('cherry.api.Trainer')
def test_train_api(self, mock_train):
cherry.train(model=self.model)
mock_train.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB',
encoding=None, language='English', preprocessing=None, vectorizer=None,
vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Performance')
def test_performance_api(self, mock_performance):
cherry.performance(model=self.model)
mock_performance.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB', encoding=None,
language='English', n_splits=10, output='Stdout', preprocessing=None,
vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)
@mock.patch('cherry.api.Performance')
def test_performance_api_model_clf_vectorizer(self, mock_performance):
cherry.performance('foo', clf='clf', vectorizer='vectorizer')
mock_performance.assert_called_with(
'foo', categories=None, clf='clf', clf_method='MNB',
encoding=None, language='English', n_splits=10,
output='Stdout', preprocessing=None, vectorizer='vectorizer',
vectorizer_method='Count', x_data=None, y_data=None)
# @mock.patch('cherry.api.Search')
# def test_search_api(self, mock_search):
# cherry.search(model='harmful', parameters={})
# mock_search.assert_called_once_with(
# 'harmful', clf=None, clf_method=None, cv=3, iid=False, method='RandomizedSearchCV',
# n_jobs=1, parameters={}, vectorizer=None, vectorizer_method=None, x_data=None, y_data=None)
@mock.patch('cherry.api.Display')
def test_display_api(self, mock_display):
cherry.display(model=self.model)
mock_display.assert_called_once_with(
self.model, categories=None, clf=None, clf_method='MNB',
encoding=None, language='English', preprocessing=None,
vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)
|
14,334 | 6e05ef1f1e1322e8c31cbf9ce9d1e071338c357c | import datetime
from fHDHR.exceptions import EPGSetupError
class Plugin_OBJ():
def __init__(self, channels, plugin_utils):
self.plugin_utils = plugin_utils
self.channels = channels
@property
def postalcode(self):
if self.plugin_utils.config.dict["tvtv"]["postalcode"]:
return self.plugin_utils.config.dict["tvtv"]["postalcode"]
try:
postalcode_url = 'http://ipinfo.io/json'
postalcode_req = self.plugin_utils.web.session.get(postalcode_url)
data = postalcode_req.json()
postalcode = data["postal"]
except Exception as e:
raise EPGSetupError("Unable to automatically optain postalcode: %s" % e)
postalcode = None
return postalcode
@property
def lineup_id(self):
lineup_id_url = "https://www.tvtv.us/tvm/t/tv/v4/lineups?postalCode=%s" % self.postalcode
if self.plugin_utils.config.dict["tvtv"]["lineuptype"]:
lineup_id_url += "&lineupType=%s" % self.plugin_utils.config.dict["tvtv"]["lineuptype"]
lineup_id_req = self.plugin_utils.web.session.get(lineup_id_url)
data = lineup_id_req.json()
lineup_id = data[0]["lineupID"]
return lineup_id
def update_epg(self):
programguide = {}
# Make a date range to pull
todaydate = datetime.date.today()
dates_to_pull = []
for x in range(-1, 6):
datesdict = {
"start": todaydate + datetime.timedelta(days=x),
"stop": todaydate + datetime.timedelta(days=x+1)
}
dates_to_pull.append(datesdict)
self.remove_stale_cache(todaydate)
cached_items = self.get_cached(dates_to_pull)
for result in cached_items:
for chan_item in result:
channel_number = "%s.%s" % (chan_item["channel"]['channelNumber'], chan_item["channel"]['subChannelNumber'])
if str(channel_number) not in list(programguide.keys()):
programguide[channel_number] = {
"callsign": chan_item["channel"]["callsign"],
"name": chan_item["channel"]["name"],
"number": channel_number,
"id": str(chan_item["channel"]["stationID"]),
"thumbnail": None,
"listing": [],
}
if chan_item["channel"]["logoFilename"]:
programguide[channel_number]["thumbnail"] = "https://cdn.tvpassport.com/image/station/100x100/%s" % chan_item["channel"]["logoFilename"]
for listing in chan_item["listings"]:
timestamp = self.tvtv_timestamps(listing["listDateTime"], listing["duration"])
clean_prog_dict = {
"time_start": timestamp['time_start'],
"time_end": timestamp['time_end'],
"duration_minutes": listing["duration"],
"thumbnail": None,
"title": listing["showName"],
"sub-title": listing["episodeTitle"],
"description": listing["description"],
"rating": listing["rating"],
"episodetitle": listing["episodeTitle"],
"releaseyear": listing["year"],
"genres": [],
"seasonnumber": None,
"episodenumber": None,
"isnew": listing["new"],
"id": listing["listingID"],
}
if listing["artwork"]["poster"]:
listing["artwork"]["poster"] = "https://cdn.tvpassport.com/image/show/480x720/%s" % listing["artwork"]["poster"]
if not any((d['time_start'] == clean_prog_dict['time_start'] and d['id'] == clean_prog_dict['id']) for d in programguide[channel_number]["listing"]):
programguide[channel_number]["listing"].append(clean_prog_dict)
return programguide
def tvtv_timestamps(self, starttime, duration):
start_time = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M:%S').timestamp()
timestamp = {
"time_start": start_time,
"time_end": start_time + (duration * 60)
}
return timestamp
def get_cached(self, dates_to_pull):
for datesdict in dates_to_pull:
starttime = "%s%s" % (datesdict["start"], "T00%3A00%3A00.000Z")
stoptime = "%s%s" % (datesdict["stop"], "T00%3A00%3A00.000Z")
url = "https://www.tvtv.us/tvm/t/tv/v4/lineups/%s/listings/grid?start=%s&end=%s" % (self.lineup_id, starttime, stoptime)
self.get_cached_item(str(datesdict["start"]), url)
cache_list = self.plugin_utils.db.get_plugin_value("cache_list", "epg_cache", "tvtv") or []
return [self.plugin_utils.db.get_plugin_value(x, "epg_cache", "tvtv") for x in cache_list]
def get_cached_item(self, cache_key, url):
cacheitem = self.plugin_utils.db.get_plugin_value(cache_key, "epg_cache", "tvtv")
if cacheitem:
self.plugin_utils.logger.info("FROM CACHE: %s" % cache_key)
return cacheitem
else:
self.plugin_utils.logger.info("Fetching: %s" % url)
try:
resp = self.plugin_utils.web.session.get(url)
except self.plugin_utils.web.exceptions.HTTPError:
self.plugin_utils.logger.info('Got an error! Ignoring it.')
return
result = resp.json()
self.plugin_utils.db.set_plugin_value(cache_key, "epg_cache", result, "tvtv")
cache_list = self.plugin_utils.db.get_plugin_value("cache_list", "epg_cache", "tvtv") or []
cache_list.append(cache_key)
self.plugin_utils.db.set_plugin_value("cache_list", "epg_cache", cache_list, "tvtv")
def remove_stale_cache(self, todaydate):
cache_list = self.plugin_utils.db.get_plugin_value("cache_list", "epg_cache", "tvtv") or []
cache_to_kill = []
for cacheitem in cache_list:
cachedate = datetime.datetime.strptime(str(cacheitem), "%Y-%m-%d")
todaysdate = datetime.datetime.strptime(str(todaydate), "%Y-%m-%d")
if cachedate < todaysdate:
cache_to_kill.append(cacheitem)
self.plugin_utils.db.delete_plugin_value(cacheitem, "epg_cache", "tvtv")
self.plugin_utils.logger.info("Removing stale cache: %s" % cacheitem)
self.plugin_utils.db.set_plugin_value("cache_list", "epg_cache", [x for x in cache_list if x not in cache_to_kill], "tvtv")
def clear_cache(self):
cache_list = self.plugin_utils.db.get_plugin_value("cache_list", "epg_cache", "tvtv") or []
for cacheitem in cache_list:
self.plugin_utils.db.delete_plugin_value(cacheitem, "epg_cache", "tvtv")
self.plugin_utils.logger.info("Removing cache: %s" % str(cacheitem))
self.plugin_utils.db.delete_plugin_value("cache_list", "epg_cache", "tvtv")
|
14,335 | 7112b26d11109f8063757df724c06da8d2357ab6 | ################################################################################
# NAME: Tyler Quayle #
# ASSIGNMENT: 2 #
# PART: Files, Masks and plotting #
################################################################################
"""
This program will read in the given file ASFG_Ts.txt in order to comb the
data to find bad entries, whether they are missing delimiters or values. I
added the function to write the bad data to a file for investigation later.
After combing is done, display the results of the given file.
"""
import os
import numpy as N
import matplotlib.pyplot as plt
# writing it to Canopy default address
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
readIn = open(os.path.join(__location__, 'ASFG_Ts.txt'), 'r')
errorFile = open(os.path.join(__location__, 'Bad_Data.txt'), 'w')
successFile = open(os.path.join(__location__, 'Successful_Data.txt'), 'w')
data = readIn.readlines() # Get every line from ASFG_Ts.txt and put into a list
julDate = [] # Julian Date List
lat = [] # Latitude List
lon = [] # Longitude List
tem = [] # Temperature List
# Go thru data list, find any bad data and append to Bad_Data.txt. any 'good'
# data, append to Successful_Data.txt.
for j in data:
try:
err_check = j.split("\t")
#Check to see if any of the split items contain nothing in the case that
# there are the correct amount of \t's but nothing inbetween
if(float(err_check[1]) != None and
float(err_check[2]) != None and
float(err_check[3]) != None):
# Append the 'good' data to the correct lists
julDate.append(float(err_check[0]))
lat.append(float(err_check[1]))
lon.append(float(err_check[2]))
tem.append(float(err_check[3])) #
successFile.write(j)
except ValueError: # The split function found words, not numbers
errorFile.write(j)
except IndexError: # The split function did not find 4 pieces of dats
errorFile.write(j)
mean = sum(tem)/len(tem) # find Mean of Temp list.
median = N.median(tem) # Find median of Temp list, didn't know
deviation = (sum([(i-mean)**2 for i in tem])/(len(tem)))**.5
print "Records read in: ", len(data)
print "Number of good Records: ", len(tem)
print "Temp Mean: ", mean
print "Temp Median: ", median
print "Standard Deviation: ", deviation
# Creat plot with date being X-Axis and Y-Axis being recorded temp for that date
plt.plot(julDate, tem)
plt.xlabel("Julian Date")
plt.ylabel("Temp")
plt.title("Surface Heat Budget of the Arctic")
plt.show()
# CLOSE ALL FILES
readIn.close()
errorFile.close()
successFile.close() |
14,336 | a7c8a503ec48be071ea6d2e87d53b7e103278fa3 | n = int(input("Enter number to get factorial: "))
f = 1
for i in range(1, n+1):
f *= i
x = 1
y = 1
print("Factorial ",n," equils ",f)
is_factorial = int(input("Check if a number is a factorial of any number.\nEnter number for check: "))
while True:
if is_factorial%x == 0:
x += 1
y *= x
if y == is_factorial:
print("Yes! The given number is a factorial of ",x)
break
elif is_factorial%x >= 1:
x += 1
print("The given number is not a factorial.")
break
|
14,337 | 6e59651bec1d8d8cd15a4b0adac85870885bd53e | import unittest
from project.tests.base import BaseTestCase
from project.models import *
class TestMessageDatabase(BaseTestCase):
def test_is_test_running(self):
self.assertEqual(0, 0)
def add_user(self, username, password):
u = User(username=username,password=password)
db.session.add(u)
db.session.commit()
return u
def test_user_password_is_hashed_and_check_password_works(self):
u = self.add_user("potato","chip")
self.assertFalse(u.user_password_hash == "chip")
self.assertTrue(u.check_password("chip"))
self.assertFalse(u.check_password("wrongpassword"))
u2 = self.add_user("chip","chocolate")
self.assertFalse(u2.user_password_hash == "chocolate")
self.assertFalse(
u2.user_password_hash == u.user_password_hash
)
def add_message_group(self,group_name, creator):
new_message_group = Message_group(
group_name=group_name,
creator=creator
)
db.session.add(new_message_group)
db.session.commit()
return new_message_group
def test_user_create_message_group(self):
u = self.add_user("potato","chip")
new_message_group = self.add_message_group("bunny slayers", u)
self.assertEqual(len(Message_group.query.all()), 1)
self.assertEqual(len(new_message_group.members), 1)
self.assertNotEqual(new_message_group.date_created, None)
self.assertEqual(new_message_group.time_most_recent_post, None)
self.assertEqual(new_message_group.creator, u.id)
def add_message(self, message_group, user, message):
new_message = Message(message_group, user, message)
db.session.add(new_message)
db.session.commit()
return new_message
def test_messages_can_be_created(self):
u = self.add_user("potato","chip")
message_group = self.add_message_group("bunnies", u)
new_message = self.add_message(message_group, u, "hi there")
self.assertEqual(len(message_group.messages), 1)
self.assertIn("hi there", message_group.messages[0].message)
def test_multple_users_in_groups(self):
u = self.add_user("potato","chip")
message_group = self.add_message_group("bunnies", u)
members = [("chocolate", "chip"), ("poker","chip"), ("pumpkin", "pie")]
for member in members:
new_member = self.add_user(member[0],member[1])
message_group.members.append(new_member)
self.assertEqual((len(members) + 1), len(message_group.members))
member_names = [member.username for member in message_group.members]
for member in members:
self.assertIn(member[0], member_names)
for member in members:
m = User.query.filter_by(username=member[0]).first()
self.assertTrue(message_group in m.groups)
if __name__ == "__main__":
unittest.main()
|
14,338 | fdd89be2e66d72d345291fb8f04dd119ba740a8a | f = open("01.txt", "r")
past = 150 # using this as base case since first number on file is 150 (prevent dec or inc count from chaning)
dec = 0
inc = 0
for i in f.read().splitlines():
if i > past:
inc += 1
elif i < past:
dec += 1
past = i
print(inc)
|
14,339 | b2f9387801d5d9a70d9b83af101366772d5f28e9 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
GENDERS = (
(u'F', u'Female'),
(u'M', u'Male'),
)
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True)
gender = models.CharField(max_length=1, choices=GENDERS, default=u'M')
friends = models.ManyToManyField("self", related_name="friends", null=True, blank=True)
phone = models.CharField(max_length=12, null=True, blank=True)
def __unicode__(self):
return str(self.user.username)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User) |
14,340 | 3e357218abed9776d9ef20e296ab3f774932d30b | import glob
import os
import constants
class RequestGenerator:
def __init__ (self, traceFileDirectory):
self.traceFileDirectory = traceFileDirectory
def readTraceFile(self):
'''
Collects all the files in the trace directory
'''
files = glob.glob(self.traceFileDirectory + '*')
for file in files:
if os.path.isdir(file): print os.path.basename(file) + '/' # we may need to recursively print from directories
else: print os.path.basename(file)
if __name__ == '__main__':
traceFileDirectory = 'inputs/'
rg = RequestGenerator(traceFileDirectory)
rg.readTraceFile()
|
14,341 | 628097e20ca4bdb6d546f118945a54cdd61c97fe | from django.contrib import admin
from .models import TreeType,Tree
# Register your models here.
admin.site.register(TreeType)
admin.site.register(Tree) |
14,342 | 5a4e7c7c8f6b08c86529a05c2f91edffeffeacc8 | from sparks.blob_reader import BlobReader
from sparks.blob_saver import BlobSaver
from multiprocessing import JoinableQueue, Process, Event
from sparks import utils
from tempfile import NamedTemporaryFile
import time
import os
import sys
import subprocess
IN_ACC = "camelyon16data"
IN_KEY = "5juqtl5oUnYS3W7CRX3qNfCnYp5ReEh1RHv7AEMIx9Nu9ryL7K7xL/4y7vOH6aN/SFh5CeSaIognarZaRyeTnA=="
IN_CONTAINER = "camelyon16"
IN_PREPATH = "TrainingData/Train_"
OUT_PREPATH = None
OUT_CONTAINER = "otsu"
OUT_ACC = "samplessw"
OUT_KEY = "2wr3eLjg+olIVZmEyGF+FUEBLO0KyXgcv2NgXslQmcnR5Lrv1egHbDXstSNXKu+BzgvU2XNgUo6lRRX/dVbrUA=="
def check_for_mask(blob):
if blob.lower().find("tumor") > 0:
return True
else:
return False
def download_blob(blobs, args):
acc, key, container = args[0]
files = args[1]
blob = blobs.get()
reader = BlobReader(acc, key, container)
mask = None
mask_name = None
if (check_for_mask(blob.name)):
number = os.path.basename(blob.name).split(".")[0].split("_")[1]
mask = ("TrainingData/Ground_Truth/Mask/Tumor_%s_Mask.tif" % (number))
print("Downloading %s ..." % (mask))
mask_handle = NamedTemporaryFile(dir="C://temp", delete=False)
reader.to_file(mask_handle, mask)
mask_name = mask_handle.name
print("Downloading %s ..." % (blob.name))
handle = NamedTemporaryFile(dir="C:/temp", delete=False)
reader.to_file(handle, blob.name)
files.put((blob.name, handle.name, mask, mask_name))
blobs.task_done()
def process(files, args):
try:
blob_name, handle_name, mask_name, mask_handle = files.get(timeout=0.3)
except:
return
runner = args[0]
acc, key, container = args[1]
filename = os.path.basename(blob_name).split(".")[0]
prefix = runner.split(".")[0]
outname = prefix + "_" + filename
threshname = "./otsu_" + filename
with open(handle_name, "r") as handle:
print("Processing... %s" % (handle_name))
command = ["python", runner, handle_name, threshname]
if mask_handle:
command.append(mask_handle)
command.append("/ssd_data/samples_best")
ret = subprocess.call(command)
if not ret == 0:
print("Processing failed for %s." % (handle_name))
os.remove(handle_name)
if mask_handle:
os.remove(mask_handle)
saver = BlobSaver(acc, key, container, "")
saver(os.path.basename(outname), "OK")
files.task_done()
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) == 0:
raise AttributeError("Missing runner.")
RUNNER = args[0]
downloads = JoinableQueue()
files = JoinableQueue(5)
download_event = Event()
process_event = Event()
in_reader = BlobReader(IN_ACC, IN_KEY, IN_CONTAINER)
blobs = in_reader.list(IN_PREPATH)
out_reader = BlobReader(OUT_ACC, OUT_KEY, OUT_CONTAINER)
outs = out_reader.list(OUT_PREPATH)
print(len(blobs), len(outs))
for x in outs:
print(x.name)
for x in blobs:
print(x.name)
blobs = list(filter(lambda x: os.path.basename(x.name).split(".")[0] not in
list(map(lambda x: "_".join(x.name.split("_")[1:]).split(".")[0],
outs)), blobs))
print(len(blobs))
list(map(lambda x: downloads.put(x), blobs))
downloaders = map(lambda x: Process(target=utils.consume,
args=(downloads,
download_event,
((IN_ACC, IN_KEY, IN_CONTAINER),
files),
download_blob)),
range(0, 2))
list(map(lambda proc: proc.start(), downloaders))
processors = map(lambda x: Process(target=utils.consume,
args=(files,
process_event,
(RUNNER,
(OUT_ACC,
OUT_KEY,
OUT_CONTAINER)),
process)),
range(0, 1))
list(map(lambda proc: proc.start(), processors))
downloads.join()
files.join()
download_event.set()
process_event.set() |
14,343 | fa6e1942160a6d6032914c3cb301e37f615c03c0 | #import nester
import pickle
man = []
other = []
try:
data = open('sketch.txt')
for each_line in data:
try:
(role, line_spoken) = each_line.split(':', 1)
line_spoken = line_spoken.strip()
if (role == 'Man'):
man.append(line_spoken)
elif (role == 'Other Man'):
other.append(line_spoken)
except ValueError:
pass
data.close()
except IOError:
print('The datafile is missing!')
try:
with open('man_data.txt', 'wb') as manfile:
pickle.dump(man, manfile)
#nester.print_lol(man, outfile=manfile)
with open('other_data.txt', 'wb') as otherfile:
#nester.print_lol(other, outfile=otherfile)
pickle.dump(other, otherfile)
except IOError:
print('write file error!')
except pickle.PickleError as perr:
print('Pickle Error ' + str(perr))
|
14,344 | 0569fead0a10c32235f4d66b355c781302967b4c | '''
大华 NVR 接口
'''
from _ctypes import Structure, byref
from ctypes import c_ubyte, c_int, c_char_p, c_bool
from video.NvrBase import NvrBase
from video.RealPlayer import RealPlayerForm, PtzDir
# 登录用的返回结构体
class NET_DEVICEINFO_Ex(Structure):
_fields_ = [
('sSerialNumber', c_ubyte * 48),
('nAlarmInPortNum', c_int),
('nAlarmOutPortNum', c_int),
('nDiskNum', c_int),
('nDVRType', c_int),
('nChanNum', c_int),
('byLimitLoginTime', c_ubyte),
('byLeftLogTimes', c_ubyte),
('bReserved', c_ubyte * 2),
('nLockLeftTime', c_int),
('Reserved', c_ubyte * 24),
]
'''
NVR 管理类
'''
class NvrDH(NvrBase):
# 登录
def login(self):
loginfo = NET_DEVICEINFO_Ex()
err = NET_DEVICEINFO_Ex()
self.userSession = self.nvrDll.CLIENT_LoginEx2(
c_char_p(self.nvrIp.encode('ascii')),
self.nvrPort,
c_char_p(self.nvrLogin.encode('ascii')),
c_char_p(self.nvrPass.encode('ascii')),
0,
None,
byref(loginfo),
byref(err)
)
return self.userSession
# 实时播放
def real_play(self, cha):
vf = RealPlayerForm(parent=self.wxApp, title="实时预览")
vf.Show()
vf.nvr = self
vf.channel = cha
vf.nvrId = self.nvrId
vf.mgr = self.mgr
vf.previewSession = self.nvrDll.CLIENT_RealPlay(
self.userSession,
cha - 1,
vf.GetHandle()
)
# 添加到窗口列表
vf.AddToWindowList()
# 实时播放停止
def real_play_stop(self, previewSession):
self.nvrDll.CLIENT_StopRealPlay(previewSession)
# 云台控制
'''
BOOL CLIENT_DHPTZControlEx2(
LLONG lLoginID, // Long
int nChannelID, // Long
DWORD dwPTZCommand, // Long
LONG lParam1,
LONG lParam2,
LONG lParam3,
BOOL dwStop,
void* param4 = NULL
);
'''
def ptz_start(self, cha, direction):
dcode = 0
if direction == PtzDir.UP:
dcode = 0
elif direction == PtzDir.RIGHT:
dcode = 3
elif direction == PtzDir.DOWN:
dcode = 1
elif direction == PtzDir.LEFT:
dcode = 2
elif direction == PtzDir.UP_RIGHT:
dcode = 33
elif direction == PtzDir.DOWN_RIGHT:
dcode = 35
elif direction == PtzDir.DOWN_LEFT:
dcode = 34
elif direction == PtzDir.UP_LEFT:
dcode = 32
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, dcode, 0, 4, 0, bool(0), None)
# 云台控制停止
def ptz_stop(self, cha):
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 34, 0, 4, 0, bool(1), None)
# 变焦
def zoom(self, cha, direction):
if direction > 0:
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 4, 0, 8, 0, bool(0), None)
else:
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 5, 0, 8, 0, bool(0), None)
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 5, 0, 8, 0, bool(1), None)
# PTZ 调用
def goPtz(self, cha, ptz):
self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 10, 0, ptz, 0, bool(0), None)
|
14,345 | ba75b7db928d71757b89c9390e392b9f81c292d7 | from flask import jsonify
from microcosm_flask.formatting.base import BaseFormatter
class JSONFormatter(BaseFormatter):
CONTENT_TYPE = "application/json"
@property
def content_type(self):
return JSONFormatter.CONTENT_TYPE
def build_response(self, response_data):
return jsonify(response_data)
|
14,346 | 36c17836ee0685f4076fe0702f667eb989e02c85 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2018-03-21 19:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
('datacenterlight', '0013_dclnavbarpluginmodel'),
]
operations = [
migrations.CreateModel(
name='DCLSectionPromoPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('heading', models.CharField(
blank=True, help_text='An optional heading for the Promo Section', max_length=100, null=True)),
('subheading', models.CharField(
blank=True, help_text='An optional subheading for the Promo Section', max_length=200, null=True)),
('content', djangocms_text_ckeditor.fields.HTMLField()),
('html_id', models.SlugField(
blank=True, help_text='An optional html id for the Section. Required to set as target of a link on page', null=True)),
('plain_heading', models.BooleanField(default=False,
help_text='Select to keep the heading style simpler.')),
('center_on_mobile', models.BooleanField(default=False,
help_text='Select to center align content on small screens.')),
('background_image', filer.fields.image.FilerImageField(blank=True, help_text='Optional background image for the Promo Section',
null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dcl_section_promo_promo', to='filer.Image')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
14,347 | 0e2cbc0f1cfa7bcd7c40c53b3f9fde696111efa3 | from django.urls import path
from yellowbird import views
from rest_framework_simplejwt import views as jwt_views
urlpatterns = [
path('user/', views.UserView.as_view()),
path('user/login/', jwt_views.TokenObtainPairView.as_view(),
name='token_obtain_pair'),
path('user/login/refresh/', jwt_views.TokenRefreshView.as_view(),
name='token_refresh'),
] |
14,348 | fda20e3b54a1fd3542450011bad8dc23b189a403 | from collections import namedtuple, UserList, Iterable
# from jakdojade.utils import fuzzy_search, resolve_class, dotted_lowercase_get
from jakdojade import utils
class TransitList(UserList):
def search(self, value):
return utils.fuzzy_search(self.data, value)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.data.__repr__())
def __call__(self, value):
return self.search(value)
def __contains__(self, value):
bool(self.search(value))
class TransitType:
_json_prefix = ''
_remaps = {}
_recurse = {}
def __init__(self, json):
params = dict(self._transform_json(json))
self._transform_children(params)
self._set_slots(params)
def _set_slots(self, params):
for k, v in params.items():
if k in self.__slots__:
setattr(self, k, v)
@classmethod
def _transform_json(cls, json):
for s in cls.__slots__:
key = cls._remaps.get(s, cls._json_prefix + s)
value = utils.dotted_lowercase_get(json, key)
yield s, value
def _transform_children(self, dictionary):
for key, child_class in self._recurse.items():
child_class = utils.resolve_class(child_class, globals())
d_value = dictionary[key]
if isinstance(d_value, Iterable):
l = TransitList(child_class(d) for d in d_value)
dictionary[key] = TransitList(l)
else:
dictionary[key] = child_class(d_value)
class City(TransitType):
_search_attribute = 'name'
_json_prefix = 'city'
__slots__ = 'id', 'symbol', 'name', 'operators', 'position'
_remaps = {'operators': 'cityTransportOperatorsArray',
'position': 'cityCenterCoordinate'}
_recurse = {'operators': 'Operator'}
type_name = 'city'
class Operator(TransitType):
_search_attribute = 'name'
_json_prefix = 'transportOperator.transportOperator'
__slots__ = 'id', 'name', 'symbol'
type_name = 'operator'
class Stop(TransitType):
_search_attribute = 'name'
__slots__ = 'code', 'name', 'time_sum', 'lat', 'lon'
_remaps = {'time_sum': 'travelMinsSum', 'lat': 'coordinate.y_lat',
'lon': 'coordinate.x_lon'}
type_name = 'stop'
@property
def position(self):
return self.lat, self.lon
class Line(TransitType):
_search_attribute = 'name'
_remaps = {'name': 'lineSymbol', 'routes': 'directions'}
_recurse = {'routes': 'Route'}
__slots__ = 'name', 'routes'
_fields = __slots__
type_name = 'line'
class Route(TransitType):
_search_attribute = 'name'
__slots__ = 'name', 'symbol', 'stops'
_json_prefix = 'direction'
_remaps = {'stops': 'mainStops'}
_recurse = {'stops': 'Stop'}
type_name = 'route'
@property
def geo_direction(self):
start_lat, start_lon = self.stops[0].position
end_lat, end_lon = self.stops[-1].position
d_lat = start_lat - end_lat
d_lon = start_lon - end_lon
return d_lat, d_lon
@property
def geo_direction_name(self):
d_lat, d_lon = self.geo_direction
dest_1 = 'south' if d_lat > 0 else 'north'
dest_2 = 'east' if d_lon < 0 else 'west'
return [dest_1, dest_2]
TableEntry = namedtuple('TableEntry', 'hours minutes symbols')
|
14,349 | 5e6918b39de1c72294dcf8ce0e65ca5cf01a0911 | #-*-coding=UTF-8 -*-
'''
Created on 2017年3月15日
@author: HP
'''
from selenium import webdriver
import unittest,time
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from time import sleep
from warnings import catch_warnings
import re
from selenium.common.exceptions import NoSuchElementException,\
NoAlertPresentException
import HTMLTestRunner
import sys
sys.path.append(r'E:/Users/HP/PycharmProjects/ZiChan100/src')
import login
from autowebdriver import AutoWebdriver
"""
class ZhangHu(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver=webdriver.Chrome()
cls.driver.implicitly_wait(20)
cls.driver.maximize_window()
cls.driver.get('http://101.201.41.60:60/platform/web/site/index')
#cls.driver.get('http://www.baidu.com')
title=cls.driver.title
print(title)
def test_denglu(self):
# self.search_denglu=self.driver.find_element_by_css_selector('div.el-form-item__content>button')
# self.search_denglu.click()
login.login(self,'lixingyu','123456')
self.seach_dengluxinxi=WebDriverWait(self.driver,5,1).until\
(expected_conditions.visibility_of_element_located((By.CSS_SELECTOR,"div.demo-block")),message='时间超时').text
print(self.seach_dengluxinxi)
def test_zhanghu(self):
'''点击系统管理菜单'''
#页面通过CSS选择器难以定位,所以获取所有DIV标签,先通过输出的文本判断是全部获得了div元素,再加个参数n,获取系统管理所在的div标签索引,得到一个div元素
self.xitongguanli=self.driver.find_elements_by_css_selector('div[class|="el"]')
#xitongguanli1=self.driver.find_element_by_link_text("系统管理")
#xitongguanli1.click()
print("cccga")
self.xitongguanli[2].click()
def test_zhuanghuguanli(self):
'''进入帐户管理页面'''
print('ddddddddddd')
#self.acountmanage=self.driver.find_element_by_css_selector('li[class$=active]')
#self.xitongguanli=self.driver.find_elements_by_css_selector('div[class|="el"]')
#self.acountmanage=self.driver.find_elements_by_css_selector('ul[class|="el"]')
#self.acountmanage=self.driver.find_elements_by_css_selector('div[class="el-submenu__title"]')
#定位帐户管理代码
self.acountmanage=self.driver.find_elements_by_css_selector('li[class="el-menu-item"]')
self.acountmanage[0].click()
def test_chaxun(self):
file=open(r'E:\zhanghuguanli.txt','r')
values=file.readlines()
file.close()
print('可以运行')
for line in values:
name=line.strip('\n')
self.zhanghuchaxun_text=self.driver.find_element_by_css_selector('input[type="text"]')#('input[placeholder="请输入内容"]')
self.zhanghuchaxun_text.clear()
self.zhanghuchaxun_text.send_keys(name)
self.zhanghuchaxun_button=self.driver.find_element_by_css_selector('button[class="el-button el-button--success el-button--mini"]')
self.zhanghuchaxun_button.click()
sleep(2)
self.zhanghuchaxun_table=self.driver.find_element_by_css_selector('div[class="el-table__body-wrapper"]>table>tbody>tr:first-child>td:nth-child(4)')
print(name+'---'+self.zhanghuchaxun_table.text)
expected_rex=re.compile('.*'+name+'.*')
self.assertRegex(self.zhanghuchaxun_table.text,expected_rex)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__=='__main__':
#unittest.main()
suite=unittest.TestSuite()
suite.addTest(ZhangHu('test_denglu'))
suite.addTest(ZhangHu('test_zhanghu'))
suite.addTest(ZhangHu('test_zhuanghuguanli'))
#suite.addTest(ZhangHu('test_chaxun'))
runner=unittest.TextTestRunner()
runner.run(suite)
# file_report=r'E:\result.html'
# fp=open(file_report,'wb')
# runnera=HTMLTestRunner.HTMLTestRunner(
# stream=fp,
# title=u'测试报告',
# description=u'用例执行情况:',
# )
# runnera.run(suite)
# fp.close()
""" |
14,350 | 920d6b10d535d91130851073c940d1dc8445d689 | from lark import Transformer
from lark.lexer import Token
class JSONTransformer(Transformer): # pragma: no cover
def __init__(self, compact=False):
self.compact = compact
super().__init__()
def __default__(self, data, children, meta):
items = []
for c in children:
if isinstance(c, Token):
token_repr = {
"@module": "lark.lexer",
"@class": "Token",
"type_": c.type,
"value": c.value,
}
if self.compact:
del token_repr["@module"]
del token_repr["@class"]
items.append(token_repr)
elif isinstance(c, dict):
items.append(c)
else:
raise ValueError(f"Unknown type {type(c)} for tree child {c}")
tree_repr = {
"@module": "lark",
"@class": "Tree",
"data": data,
"children": items,
}
if self.compact:
del tree_repr["@module"]
del tree_repr["@class"]
return tree_repr
|
14,351 | 0cd9095300e98aeff4e48f0d373470f4bed43a7a | from collections import Counter
my_list = [10,10,10,10,20,20,20,20,40,40,50,50,30]
print(my_list)
# zliczanie
count_my_list = Counter(my_list)
print(count_my_list) |
14,352 | 67dd6583829909bc34af509c3a8c3484dbbf8ca7 | # Module refract.py
import math
def refdry(nu, T, Pdry, Pvap):
# From Miriad: Determine the complex refractivity of the dry components
# of the atmosphere.
#
# Input:
# nu = observing frequency (Hz)
# T = temperature (K)
# Pdry = partial pressure of dry components (Pa)
# Pvap = partial pressure of water vapour (Pa)
# Table of microwave oxygen lines and their parameters.
nu0 = [ 49.452379, 49.962257, 50.474238, 50.987748, 51.503350, 52.021409,
52.542393, 53.066906, 53.595748, 54.129999, 54.671157, 55.221365,
55.783800, 56.264777, 56.363387, 56.968180, 57.612481, 58.323874,
58.446589, 59.164204, 59.590982, 60.306057, 60.434775, 61.150558,
61.800152, 62.411212, 62.486253, 62.997974, 63.568515, 64.127764,
64.678900, 65.224067, 65.764769, 66.302088, 66.836827, 67.369595,
67.900862, 68.431001, 68.960306, 69.489021, 70.017342, 18.750341,
68.498350, 24.763120, 87.249370, 15.393150, 73.838730, 34.145330 ]
a1 = [ 0.12E-6, 0.34E-6, 0.94E-6, 2.46E-6, 6.08E-6, 14.14E-6,
31.02E-6, 64.10E-6, 124.70E-6, 228.00E-6, 391.80E-6, 631.60E-6,
953.50E-6, 548.90E-6, 1344.00E-6, 1763.00E-6, 2141.00E-6, 2386.00E-6,
1457.00E-6, 2404.00E-6, 2112.00E-6, 2124.00E-6, 2461.00E-6, 2504.00E-6,
2298.00E-6, 1933.00E-6, 1517.00E-6, 1503.00E-6, 1087.00E-6, 733.50E-6,
463.50E-6, 274.80E-6, 153.00E-6, 80.09E-6, 39.46E-6, 18.32E-6,
8.01E-6, 3.30E-6, 1.28E-6, 0.47E-6, 0.16E-6, 945.00E-6,
67.90E-6, 638.00E-6, 235.00E-6, 99.60E-6, 671.00E-6, 180.00E-6 ]
a2 = [ 11.830, 10.720, 9.690, 8.690, 7.740, 6.840,
6.000, 5.220, 4.480, 3.810, 3.190, 2.620,
2.115, 0.010, 1.655, 1.255, 0.910, 0.621,
0.079, 0.386, 0.207, 0.207, 0.386, 0.621,
0.910, 1.255, 0.078, 1.660, 2.110, 2.620,
3.190, 3.810, 4.480, 5.220, 6.000, 6.840,
7.740, 8.690, 9.690, 10.720, 11.830, 0.000,
0.020, 0.011, 0.011, 0.089, 0.079, 0.079 ]
a3 = [ 8.40E-3, 8.50E-3, 8.60E-3, 8.70E-3, 8.90E-3, 9.20E-3,
9.40E-3, 9.70E-3, 10.00E-3, 10.20E-3, 10.50E-3, 10.79E-3,
11.10E-3, 16.46E-3, 11.44E-3, 11.81E-3, 12.21E-3, 12.66E-3,
14.49E-3, 13.19E-3, 13.60E-3, 13.82E-3, 12.97E-3, 12.48E-3,
12.07E-3, 11.71E-3, 14.68E-3, 11.39E-3, 11.08E-3, 10.78E-3,
10.50E-3, 10.20E-3, 10.00E-3, 9.70E-3, 9.40E-3, 9.20E-3,
8.90E-3, 8.70E-3, 8.60E-3, 8.50E-3, 8.40E-3, 15.92E-3,
19.20E-3, 19.16E-3, 19.20E-3, 18.10E-3, 18.10E-3, 18.10E-3 ]
a4 = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6 ]
a5 = [ 5.60E-3, 5.60E-3, 5.60E-3, 5.50E-3, 5.60E-3, 5.50E-3,
5.70E-3, 5.30E-3, 5.40E-3, 4.80E-3, 4.80E-3, 4.17E-3,
3.75E-3, 7.74E-3, 2.97E-3, 2.12E-3, 0.94E-3, -0.55E-3,
5.97E-3, -2.44E-3, 3.44E-3, -4.13E-3, 1.32E-3, -0.36E-3,
-1.59E-3, -2.66E-3, -4.77E-3, -3.34E-3, -4.17E-3, -4.48E-3,
-5.10E-3, -5.10E-3, -5.70E-3, -5.50E-3, -5.90E-3, -5.60E-3,
-5.80E-3, -5.70E-3, -5.60E-3, -5.60E-3, -5.60E-3, -0.44E-3,
0.00E00, 0.00E00, 0.00E00, 0.00E00, 0.00E00, 0.00E00 ]
a6 = [ 1.7, 1.7, 1.7, 1.7, 1.8, 1.8,
1.8, 1.9, 1.8, 2.0, 1.9, 2.1,
2.1, 0.9, 2.3, 2.5, 3.7, -3.1,
0.8, 0.1, 0.5, 0.7, -1.0, 5.8,
2.9, 2.3, 0.9, 2.2, 2.0, 2.0,
1.8, 1.9, 1.8, 1.8, 1.7, 1.8,
1.7, 1.7, 1.7, 1.7, 1.7, 0.9,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
ap = 1.4e-10 * (1.0 - 1.2e-5 * f ** 1.5)
gamma0 = 5.6e-3 * (p + 1.1 * e) * theta ** 0.8
nr = 2.588 * p * theta + 3.07e-4 * (1.0 / (1.0 + (f / gamma0) ** 2) - 1.0) * p * theta * theta
ni = (2.0 * 3.07e-4 / (gamma0 * (1.0 + (f / gamma0) ** 2) * (1.0 + (f / 60.0) ** 2)) +
ap * p * theta ** 2.5) * f * p * theta * theta
# Sum the contributions of the lines.
for i in xrange(0, len(nu0)):
S = a1[i] * p * theta ** 3 * math.exp(a2[i] * (1.0 - theta))
gamma = a3[i] * (p * theta ** (0.8 - a4[i])) + 1.1 * e * theta
delta = a5[i] * p * theta ** a6[i]
x = (nu0[i] - f) * (nu0[i] - f) + gamma * gamma
y = (nu0[i] + f) * (nu0[i] + f) + gamma * gamma
z = (nu0[i] + gamma * gamma / nu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / nu0[i] + delta *
(1.0 / x - 1.0 / y) * gamma * f / nu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / nu0[i] - delta *
((nu0[i] - f) / x + (nu0[i] + f) / y) * f / nu0[i])
# Return the result.
return complex(nr, ni)
def refvap(nu, T, Pdry, Pvap):
# From Miriad; Determine the complex refractivity of the water vapour monomers.
#
# Inputs:
# nu = observating frequency (Hz)
# T = temperature (K)
# Pdry = partial pressure of dry components (Pa)
# Pvap = partial pressure of water vapour (Pa)
# Table of the microwave water lines.
mnu0 = [ 22.235080, 67.813960, 119.995940, 183.310117, 321.225644, 325.152919,
336.187000, 380.197372, 390.134508, 437.346667, 439.150812, 443.018295,
448.001075, 470.888947, 474.689127, 488.491133, 503.568532, 504.482692,
556.936002, 620.700807, 658.006500, 752.033227, 841.073593, 859.865000,
899.407000, 902.555000, 906.205524, 916.171582, 970.315022, 987.926764 ]
b1 = [ 0.1090, 0.0011, 0.0007, 2.3000, 0.0464, 1.5400,
0.0010, 11.9000, 0.0044, 0.0637, 0.9210, 0.1940,
10.6000, 0.3300, 1.2800, 0.2530, 0.0374, 0.0125,
510.0000, 5.0900, 0.2740, 250.0000, 0.0130, 0.1330,
0.0550, 0.0380, 0.1830, 8.5600, 9.1600, 138.000 ]
b2 = [ 2.143, 8.730, 8.347, 0.653, 6.156, 1.515,
9.802, 1.018, 7.318, 5.015, 3.561, 5.015,
1.370, 3.561, 2.342, 2.814, 6.693, 6.693,
0.114, 2.150, 7.767, 0.336, 8.113, 7.989,
7.845, 8.360, 5.039, 1.369, 1.842, 0.178 ]
b3 = [ 27.84E-3, 27.60E-3, 27.00E-3, 28.35E-3, 21.40E-3, 27.00E-3,
26.50E-3, 27.60E-3, 19.00E-3, 13.70E-3, 16.40E-3, 14.40E-3,
23.80E-3, 18.20E-3, 19.80E-3, 24.90E-3, 11.50E-3, 11.90E-3,
30.00E-3, 22.30E-3, 30.00E-3, 28.60E-3, 14.10E-3, 28.60E-3,
28.60E-3, 26.40E-3, 23.40E-3, 25.30E-3, 24.00E-3, 28.60E-3 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
nr = 2.39 * e * theta + 41.6 * e * theta * theta + 6.47e-6 * f ** 2.05 * e * theta ** 2.4
ni = (0.915 * 1.40e-6 * p + 5.41e-5 * e * theta * theta * theta) * f * e * theta ** 2.5
# Sum the contributions of the lines.
for i in xrange(0, len(mnu0)):
S = b1[i] * e * theta ** 3.5 * math.exp(b2[i] * (1.0 - theta))
gamma = b3[i] * (p * theta ** 0.8 + 4.80 * e * theta)
x = (mnu0[i] - f) * (mnu0[i] - f) + gamma * gamma
y = (mnu0[i] + f) * (mnu0[i] + f) + gamma * gamma
z = (mnu0[i] + gamma * gamma / mnu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / mnu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / mnu0[i])
# Return the result.
return complex(nr, ni)
def pvapsat(T):
# From Miriad; Determine the saturation pressure of water vapour.
# Input:
# T = temperature (K)
#
# Output:
# vapour saturation pressure (Pa)
if (T > 215):
theta = 300.0 / T
return 1e5 / (41.51 * (theta ** -5) * (10 **(9.384 * theta - 10.0)))
else:
return 0.0
def refract(t, pdry, pvap, z, n, nu, T0, el):
# From Miriad; Compute refractive index for an atmosphere.
# Determine the sky brightness and excess path lengths for a parallel
# slab atmosphere. Liebe's model (1985) is used to determine the complex
# refractive index of air.
#
# Input:
# n = the number of atmospheric layers.
# t = temperature of the layers. T[0] is the temperature at the lowest
# layer (K)
# Pdry = partial pressure of the dry components (Pa)
# Pvap = partial pressure of the water vapour components (Pa)
# z = height of the layer.
# nu = frequency of interest (Hz)
# T0 = astronomical brightness temperature (K)
# el = elevation angle of the source above the atmosphere (rad)
#
# Output:
# { 'Tb' = brightness temperature (K),
# 'tau' = opacity (nepers)
# 'Ldry' = excess path, dry component (m)
# 'Lvap' = excess path, water vapour component (m) }
# Some constants.
HMKS = 6.6260755e-34 # Planck constant, J.s
KMKS = 1.380658e-23 # Boltzmann constant, J/K
CMKS = 299792458 # Speed of light, m/s
tau = 0.0
Tb = HMKS * nu / (KMKS * (math.exp(HMKS * nu / (KMKS * T0)) - 1))
Ldry = 0.0
Lvap = 0.0
snell = math.sin(el)
for i in xrange(n, 0, -1):
if (i == 1):
dz = 0.5 * (z[1] - z[0])
elif (i == n):
dz = 0.5 * (z[n] - z[n - 1])
else:
dz = 0.5 * (z[i + 1] - z[i - 1])
Ndry = refdry(nu, t[i], pdry[i], pvap[i])
Nvap = refvap(nu, t[i], pdry[i], pvap[i])
nr = 1 + (Ndry.real + Nvap.real) * 1e-6
ni = (Ndry.imag + Nvap.imag) * 1e-6
l = dz * nr / math.sqrt(nr * nr + (snell * snell) - 1.0)
dtau = l * 4.0 * math.pi * nu / CMKS * ni
Tb = (Tb - t[i]) * math.exp(-dtau) + t[i]
tau = tau + dtau
Ldry = Ldry + l * Ndry.real * 1e-6
Lvap = Lvap + l * Nvap.real * 1e-6
return { 'Tb': Tb, 'tau': tau, 'Ldry': Ldry, 'Lvap': Lvap }
def calcOpacity(freq, el, t0, p0, h0):
# From Miriad; Compute sky brightness and opacity of a model atmosphere.
# Returns the transmissivity of the atmosphere given frequency, elevation
# angle and meteorological data. This uses a simple model of the atmosphere
# and Liebe's model (1985) of the complex refractive index of air.
# Input:
# freq = frequency (Hz)
# el = elevation angle (radians)
# t0,p0,h0 = Met data; observatory temperature, pressure and humidity
# (K, Pa, fraction)
# Output:
# { 'fac' = transmissivity (fraction between 0 and 1)
# 'Tb' = sky brightness temperature (K) }
# Atmospheric parameters.
M = 28.96e-3
R = 8.314
Mv = 18e-3
rho0 = 1e3
g = 9.81
# d = temperature lapse rate (K/m)
d = 0.0065
# z0 = water vapour scale height (m)
z0 = 1540.0
# zmax = max altitude of model atmosphere (m)
zmax = 10000.0
# Generate a model of the atmosphere - T is temperature, Pdry is
# partial pressure of "dry" constituents, Pvap is the partial
# pressure of the water vapour.
N = 50 # Number of iterations through the atmosphere in height
z = []
T = []
Pvap = []
Pdry = []
fac = []
Tb = []
tau = []
ofreq = []
for i in xrange(0, (N + 1)):
zd = float(i) * zmax / float(N)
z.append(zd)
T.append(t0 / (1 + d / t0 * zd))
P = p0 * math.exp(-1.0 * M * g / (R * t0) * (zd + 0.5 * d * zd * zd / t0))
Pvap.append(min(h0 * math.exp(-1.0 * zd / z0) * pvapsat(T[0]),
pvapsat(T[i])))
Pdry.append(P - Pvap[i])
# Determine the transmissivity and sky brightness.
#job_server = pp.Server(secret="hello")
#jobs = [(inpf, job_server.submit(refract, (T, Pdry, Pvap, z, N, inpf, 2.7, el),
# (refdry, refvap, pvapsat), ("math",))) for inpf in freq]
for i in xrange(0, len(freq)):
#print "calculating atmosphere for frequency %.1f" % freq[i]
resref = refract(T, Pdry, Pvap, z, N, freq[i], 2.7, el)
fac.append(math.exp(-1.0 * resref['tau']))
Tb.append(resref['Tb'])
tau.append(resref['tau'])
ofreq.append(freq[i])
#for inpf, job in jobs:
# resref = job()
# fac.append(math.exp(-1.0 * resref['tau']))
# Tb.append(resref['Tb'])
# tau.append(resref['tau'])
# ofreq.append(inpf)
return { 'fac': fac, 'Tb': Tb, 'tau': tau, 'freq': ofreq }
|
14,353 | 379fff24b1f249d1c716537cb70fb3b6769b6a46 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
auto_data = pd.read_csv("Detail_Cars.csv")
auto_data = auto_data.replace('?', np.nan)
col_object = auto_data.select_dtypes(include=['object'])
auto_data['price'] = pd.to_numeric(auto_data['price'], errors = 'coerce')
auto_data['bore'] = pd.to_numeric(auto_data['bore'], errors = 'coerce')
auto_data['stroke'] = pd.to_numeric(auto_data['stroke'], errors = 'coerce')
auto_data['horsepower'] = pd.to_numeric(auto_data['horsepower'], errors = 'coerce')
auto_data['peak-rpm'] = pd.to_numeric(auto_data['peak-rpm'], errors = 'coerce')
auto_data = auto_data.drop("normalized-losses", axis = 1)
cylin_map= {'two':2, 'three':3, 'four':4, 'five':5, 'six':6, 'eight':8, 'twelve':12}
auto_data['num-of-cylinders'].replace(cylin_map, inplace=True)
auto_data = pd.get_dummies(auto_data, drop_first=True)
auto_data = auto_data.dropna()
X = auto_data.drop('price', axis = 1)
y = auto_data['price']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state=0)
from sklearn.linear_model import LinearRegression
li_model = LinearRegression()
li_model.fit(X_train, y_train)
print("Training Set:", li_model.score(X_train, y_train))
print("Test Set:",li_model.score(X_test, y_test))
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=5, normalize = True)
lasso.fit(X_train, y_train)
print("Training Set:", lasso.score(X_train, y_train))
print("Test Set:",lasso.score(X_test, y_test))
predictors = X_train.columns
coef = pd.Series(lasso.coef_, predictors).sort_values()
print(coef)
coef.plot(kind='bar')
from sklearn.linear_model import Ridge
ridge = Ridge(alpha = 2, normalize = True)
ridge.fit(X_train, y_train)
print(ridge.score(X_train, y_train))
print(ridge.score(X_test, y_test))
coef_ridge = pd.Series(ridge.coef_, predictors).sort_values()
print(coef_ridge)
coef_ridge.plot(kind='bar', title='Ridge Regression')
|
14,354 | 8287c5097867389aabe804478ba26810c0561d90 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# info.py
#
# This file is part of the RoboEarth Cloud Engine test.
#
# This file was originally created for RoboEearth
# http://www.roboearth.org/
#
# The research leading to these results has received funding from
# the European Union Seventh Framework Programme FP7/2007-2013 under
# grant agreement no248942 RoboEarth.
#
# Copyright 2012 RoboEarth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# \author/s: Dominique Hunziker
#
#
import sys
import roslib; roslib.load_manifest('barcode')
import rospy
import sensor_msgs.msg
from barcode.msg import Barcode, DBInfo
def printInfo(msg):
print(msg)
def main():
if len(sys.argv) != 3 :
print('Usage: webDB.py [GTIN] [type of GTIN]')
return 0
gtin = sys.argv[1]
gtinType = sys.argv[2]
rospy.init_node('debugBarcodeInfoNode')
rospy.Subscriber('barcodeDBInfo', DBInfo, printInfo)
pub = rospy.Publisher('barcodeWebDBQuery', Barcode, latch=True)
pub.publish(gtin, gtinType)
rospy.spin()
if __name__ == '__main__':
main()
|
14,355 | eb88e2a36634b446b1a569c380522b8eb1805233 | #!/usr/bin/python
import reader
import analyzer
analyzer.analyze(reader.parser()) |
14,356 | 9f0ed48bc73d2480a55877fc286e257a80f495e3 | """Ejercicio 38.- Llenar una tabla de 10 posiciones con números enteros comprendidos entre el 1 y el
99. Ordenar dicha tabla de menor a mayor y visualizarla por pantalla de la forma
siguiente: """
from tabulate import tabulate
import random
numero = []
for i in range(10):
n = random.randrange(1, 99)
numero.append(n)
numero_ordenado = sorted(numero)
print()
tabla = { "Tabla_Inicial":numero, "Tabla_Ordenada":numero_ordenado}
print(tabulate(tabla, headers = 'keys', tablefmt ='fancy_grid', numalign = 'center')) |
14,357 | cb556e1731546dd151c2d62e771b976f3151e4da | import os
import re
import codecs
import hashlib
import hmac
import random
import string
import webapp2
import jinja2
from users import *
from google.appengine.ext import ndb
def portfolio_key(name = 'default'):
"""Assigns a key to BlogPost"""
return ndb.Key('portfolio', name)
class Project(ndb.Model):
"""Contains info about a blog post"""
title = ndb.StringProperty(required = True)
description = ndb.TextProperty(required = True)
created = ndb.DateTimeProperty(auto_now_add = True)
# TODO: Find way to include an image
link = ndb.StringProperty(required = False)
|
14,358 | 76760ec69dc470198a4bfd15e380541e01e44b77 | import sys
import pygame
from letter import Letter
def letter_generator(stats, az_settings, screen, letters):
if stats.lives_left > 0:
new_letter = Letter(az_settings, screen)
letters.add(new_letter)
else:
letters.empty()
stats.game_active = False
pygame.mouse.set_visible(True)
def check_events(az_settings, letters, stats, sb, play_button, screen):
screen_rect = screen.get_rect()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(az_settings, stats, play_button, mouse_x, mouse_y, sb)
elif event.type == pygame.KEYDOWN:
for ltr in letters:
if ltr.ascii == event.key and ltr.rect.bottom < screen_rect.bottom:
s1 = pygame.mixer.Sound("hit.wav")
s1.play()
stats.score += az_settings.letter_points
sb.prep_score()
check_high_score(stats, sb)
letters.remove(ltr)
elif ltr.ascii != event.key and ltr.rect.bottom < screen_rect.bottom and stats.score > 0:
s2 = pygame.mixer.Sound("wrong_hit.wav")
s2.play()
stats.score -= az_settings.letter_points
sb.prep_score()
def check_play_button(az_settings, stats, play_button, mouse_x, mouse_y, sb):
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
stats.reset_stats()
sb.prep_score()
az_settings.initialize_dynamic_settings()
pygame.mouse.set_visible(False)
stats.game_active = True
pygame.mixer.music.load("bg_music.mp3")
pygame.mixer.music.play(-1)
def letter_fallen(stats):
if stats.lives_left > 0:
stats.lives_left -= 1
def check_letter_bottom(screen, letters, stats):
screen_rect = screen.get_rect()
for ltr in letters.sprites():
if ltr.rect.bottom > screen_rect.bottom:
s3 = pygame.mixer.Sound("oops.wav")
s3.play()
ltr.rect.bottom = screen_rect.bottom
letter_fallen(stats)
def update_screen(az_settings, stats, screen, letters, sb, play_button):
check_letter_bottom(screen, letters, stats)
letters.draw(screen)
letters.update()
sb.show_score()
if not stats.game_active:
play_button.draw_button()
az_settings.increase_speed()
pygame.display.flip()
def check_high_score(stats, sb):
if stats.score > stats.high_score:
stats.high_score = stats.score
with open('high_score.txt', 'w') as file_object:
file_object.write(str(stats.high_score))
sb.prep_high_score()
|
14,359 | 785187f00dcef4e2c340e87f3448bc9914ecd616 | from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class SnickerdoodlesPlugin(WillPlugin):
@hear("cookies", include_me=False)
def will_likes_cookies(self, message):
self.say(rendered_template("cookies.html", {}), message=message, html=True, )
|
14,360 | 740f73a79f4ceaff73164b75eaed37fee0b44617 | #!/usr/bin/python
import sys
import string
#map payment_type, count(1) for each positive tip
for line in sys.stdin:
try:
items = line.strip().split(",")
if items[0] == 'medallion':
continue
key = items[4].upper()
if float(items[8]) > 0:
values = 1
else:
values = 0
print '%s,%s' %(key, values)
except:
pass
|
14,361 | 13395e5d55d7496a98b59374469a9eaa6d95c728 | from clients.models import Client
from addresses.models import Address
from mediguest_admin.site import mediguest_admin_site
from people.models import Person
from django.contrib import admin
from foreignkeysearch.widgets import ForeignKeySearchForm
from foreignkeysearch.handler import BaseHandler
from keywork.mod.keyworker import Keyworker
from agencyservices.models import GP
from convictions.admin import ConvictionsInline
from booking.admin import BookingsInline
from benefits.admin import ReceivedBenefitsInline
from drug_db.admin import DrugDosesInline
from keywork.admin import KeyworkSessionsInline
from supportplans.admin import SupportPlanReviewsInline
from incidents.admin import IncidentsInline
from risks.admin import RiskAssessmentsInline
from clientnotes.admin import ClientNotesInline
from hospitaladmissions.admin import HospitalAdmissionsInline
from agencycontact.admin import AgencyContactInline
from servicecharges.admin import ServiceChargesInline, ServiceChargePaymentsInline
from forms import ClientForm
class address_SearchHandler(BaseHandler):
model = Address
exclude = (
'country',
)
class next_of_kin_SearchHandler(BaseHandler):
model = Person
exclude = (
'gender',
'address',
)
class keyworker_SearchHandler(BaseHandler):
model = Keyworker
exclude = (
'gender',
'address',
)
class gp_SearchHandler(BaseHandler):
model = GP
exclude = (
'gender',
'address',
)
class ClientAdmin(admin.ModelAdmin):
form = ClientForm
inlines = [
ConvictionsInline,
BookingsInline,
ReceivedBenefitsInline,
DrugDosesInline,
KeyworkSessionsInline,
SupportPlanReviewsInline,
RiskAssessmentsInline,
IncidentsInline,
HospitalAdmissionsInline,
AgencyContactInline,
ServiceChargesInline,
ServiceChargePaymentsInline,
ClientNotesInline,
]
fieldsets = (
('General', {
'fields': (
'photo',
'gender',
'title',
'forenames',
'surnames',
'suffix',
'nee',
'date_of_birth',
'religion',
'client_pack_agreed',
)
}),
('Contact Details', {
'fields': (
'address',
'mobile_phone_no',
)
}),
('Medical Information', {
'fields': (
'national_insurance_no',
'gp',
'assigned_keyworker',
'contact_next_of_kin',
'next_of_kin',
'medical_problems',
'mobility_problems',
'assistance_required',
)
}),
('Statistics', {
'fields': (
'presented_as_homeless',
'ethnic_origin',
)
}),
)
def formfield_for_dbfield(self, db_field, **kwargs):
field = super(ClientAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name in ('address', 'keyworker', 'gp', 'next_of_kin'):
field.widget = ForeignKeySearchForm(
db_field=db_field,
handler=eval(db_field.name + '_SearchHandler'),
)
return field
mediguest_admin_site.register(Client, ClientAdmin)
|
14,362 | 847a648d37f04d83708da7c6b06852153c214d31 | l = []
s = input("Enter the string: ")
k = int(input("enter the length of substring"))
l = [s[i:i + k] for i in range(0, len(s), k)]
print(l)
for i in l:
for j in range(len(i)):
res = [i[:-1] for i in l if l[i][-1] == ' ']
else:
continue
print(res)
|
14,363 | 9ac94e2c9c664971434f7cf80c1faaebd13cbb3b | # -*- coding: utf-8 -*-
# Scrapy settings for realtySpiders project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'realtySpiders'
# DUPEFILTER_CLASS = 'scrapy.dupefilter.BaseDupeFilter'
SPIDER_MODULES = ['realtySpiders.spiders']
NEWSPIDER_MODULE = 'realtySpiders.spiders'
# LOG_ENABLED = False
FEED_EXPORT_FIELDS = ['BuildType', 'BuilderName', 'State', 'Region', 'DesignName',
'BuildFinishRange', 'BasePrice', 'Squares', 'HouseWidth',
'HouseLength', 'Lot_BlockWidth', 'LandSize', 'SturturalWarranty',
'EnergyRating', 'Storey', 'Bedrooms', 'Bathrooms', 'Garage', 'LivingArea', 'TheatreRoom_Yes_No',
'SeparateMeals_Yes_No', 'Alfresco_Yes_No', 'Study_Yes_No', 'WalkinPantry_Yes_No',
'BultersPantry_Yes_No', 'Void_Yes_No', 'His_HerWIR_Yes_No', 'BedroomGrFloor_Yes_No',
'SteelStructure_Yes_No', 'Balcony_Yes_No', 'LoungeDimension', 'FamilyDimension',
'Meals_DiningDimension', 'TheatreDimension', 'KitchenDimension', 'StudyDimension',
'AlfrescoDimension', 'GarageDimension', 'MasterBedroomDimension', 'Bedroom2Dimension',
'Bedroom3Dimension', 'Bedroom4Dimension', 'KitchenAppliance', 'KitchenAppliance1',
'KitchenAppliance2', 'KitchenAppliance3', 'ApplianceBrand', 'KitchenBenchtop', 'Splashback',
'Windows', 'FloorCovering', 'FloorCovering1', 'FloorCovering2', 'Cooling', 'CeilingHeight',
'Bath', 'EnsuiteWallTiling', 'EnsuiteBenchtop', 'EnsuiteShowerbase', 'WallPaint', 'WIRFitouts',
'SecuritySystem', 'Downlights', 'Landscaping', 'Driveway', 'Promotion', 'OtherInclusions',
'OtherInclusions1', 'OtherInclusions2', 'OtherInclusions3', 'OtherInclusions4',
'OtherInclusions5', 'BuilderEmailAddress', 'DisplayLocation', 'Lot_BlockAddress',
'HomeDesignMainImage', 'FloorPlanImage1', 'FloorPlanImage2', 'BrochureImage_pdf',
'InclusionsImage_pdf', 'Image1', 'Image2', 'Image3', 'Image4', 'Image5', 'Image6', 'Image7',
'Image8', 'Image9', 'Image10', 'Image11', 'Image12', 'Image13', 'Image14', 'Image15',
'BuilderLogo', 'url']
# FEED_STORE_EMPTY = True
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'realtySpiders (+http://www.yourdomain.com)'
# FEED_FORMAT = 'csv'
# FEED_URI = 'Result.csv'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'realtySpiders.middlewares.RealtyspidersSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'realtySpiders.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'realtySpiders.pipelines.RealtyspidersPipeline': 1,
}
FEED_EXPORTERS = {
'csv': 'realtySpiders.pipelines.HeadlessCsvItemExporter'
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
14,364 | 01c41cc89b1190ae24b6ee02903c4c5d39f58169 | # Generated by Django 2.1.4 on 2019-01-02 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("stock_maintain", "0005_newsimage_image_type"),
]
operations = [
migrations.AddField(
model_name="news",
name="is_main",
field=models.BooleanField(default=False),
),
]
|
14,365 | e8e3eb13e5c948d84e6e31502b0e21ae1e87ab0c | import argparse
import logging
import os
import os.path as osp
import re
import sys
from karabo_data import RunDirectory
from karabo_data.components import AGIPD1M, LPD1M
from karabo_data.exceptions import SourceNameError
log = logging.getLogger(__name__)
def _get_detector(data, min_modules):
for cls in (AGIPD1M, LPD1M):
try:
return cls(data, min_modules=min_modules)
except SourceNameError:
continue
def main(argv=None):
ap = argparse.ArgumentParser('karabo-data-make-virtual-cxi')
ap.add_argument('run_dir', help="Path to an EuXFEL run directory")
# Specifying a proposal directory & a run number is the older interface.
# If the run_number argument is passed, run_dir is used as proposal.
ap.add_argument('run_number', nargs="?", help=argparse.SUPPRESS)
ap.add_argument(
'-o', '--output',
help="Filename or path for the CXI output file. "
"By default, it is written in the proposal's scratch directory."
)
ap.add_argument(
'--min-modules', type=int, default=9, metavar='N',
help="Include trains where at least N modules have data (default 9)"
)
args = ap.parse_args(argv)
out_file = args.output
logging.basicConfig(level=logging.INFO)
if args.run_number:
# proposal directory, run number
run = 'r%04d' % int(args.run_number)
proposal = args.run_dir
run_dir = osp.join(args.run_dir, 'proc', run)
if out_file is None:
out_file = osp.join(proposal, 'scratch', '{}_detectors_virt.cxi'.format(run))
else:
# run directory
run_dir = os.path.abspath(args.run_dir)
if out_file is None:
m = re.search(r'/(raw|proc)/(r\d{4})/?$', run_dir)
if not m:
sys.exit("ERROR: '-o outfile' option needed when "
"input directory doesn't look like .../proc/r0123")
proposal = run_dir[:m.start()]
fname = '{}_{}_detectors_virt.cxi'.format(*m.group(2, 1))
out_file = osp.join(proposal, 'scratch', fname)
out_dir = osp.dirname(osp.abspath(out_file))
if not os.access(run_dir, os.R_OK):
sys.exit("ERROR: Don't have read access to {}".format(run_dir))
if not os.access(out_dir, os.W_OK):
sys.exit("ERROR: Don't have write access to {}".format(out_dir))
log.info("Reading run directory %s", run_dir)
run = RunDirectory(run_dir)
det = _get_detector(run, args.min_modules)
if det is None:
sys.exit("No AGIPD or LPD sources found in {!r}".format(run_dir))
det.write_virtual_cxi(out_file)
if __name__ == '__main__':
main()
|
14,366 | bfd6c65d778be337e3c1bdf3d0b59d1ba591de0a | from datetime import datetime, timedelta
def check_date(date):
next_date = datetime.strptime(date.replace('-', ''), "%Y%m%d").date()
if next_date > datetime.now().date():
return True
return False
|
14,367 | b1baf064e0ae8aca950113409272a4f779b41c17 | from django.db import models
from .managers import CustomUserManager
from django.contrib.auth.models import AbstractUser, BaseUserManager
from Accounts.profiles.models import PI_Profile, Grads_Profile, Undergrad_Profile
from .profiles.models import PI_Profile, Grads_Profile, Undergrad_Profile
# Create your models here.
class User(AbstractUser):
class UserType(models.TextChoices):
PI = 'PI', ('Principal Investigator')
Grads = 'Grads', ('Graduate Student')
Undergrad = 'Undergrad', ('Undergrad')
username = models.CharField(max_length=50)
email = models.EmailField(unique=True)
type = models.CharField(max_length = 150, choices = UserType.choices, default = UserType.Undergrad)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'type']
# base_type = UserType.Undergrad
objects = CustomUserManager()
def save(self, *args, **kwargs):
created = self.pk is None
# if not self.pk:
# self.UserType = self.base_type
super(User, self).save(*args, **kwargs)
if created and self.type == 'PI':
PI_Profile.objects.create(user = self)
elif created and self.type == 'Grads':
Grads_Profile.objects.create(user = self)
elif created and self.type == 'Undergrad':
Undergrad_Profile.objects.create(user = self)
class PI_Manager(BaseUserManager):
def get_queryset(self, *args, **kwargs):
results = super().get_queryset(*args, **kwargs)
return results.filter(type = User.UserType.PI)
class PI(User):
base_type = User.UserType.PI
objects = PI_Manager()
class Meta:
proxy = True
# Functions only available to PIs
def extra(self):
return self.PI_profile
class Grads_Manager(BaseUserManager):
def get_queryset(self, *args, **kwargs):
results = super().get_queryset(*args, **kwargs)
return results.filter(type = User.UserType.PI)
class Grads(User):
base_type = User.UserType.Grads
objects = Grads_Manager()
class Meta:
proxy = True
# Functions only available to Grads
def extra(self):
return self.Grads_profile
class Undergrad_Manager(BaseUserManager):
def get_queryset(self, *args, **kwargs):
results = super().get_queryset(*args, **kwargs)
return results.filter(type = User.UserType.Undergrad)
class Undergrad(User):
base_type = User.UserType.Undergrad
objects = Undergrad_Manager()
class Meta:
proxy = True
# Functions only available to PIs
def extra(self):
return self.Undergrad_profile
|
14,368 | ef7c2ba33e48044a15688d4eb6e27866f30ee7ec | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-07 03:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20170404_0409'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='route_name',
field=models.CharField(db_index=True, max_length=20),
),
]
|
14,369 | a32405de1b1fa08ca6d00794f5e012ebca0494f1 | #!/usr/bin/env python
"""
Global status of basinboa.
reference: http://code.google.com/p/bogboa/source/browse/trunk/mudlib/gvar.py
"""
#------------------------------------------------------------------------------
# ASCII
#------------------------------------------------------------------------------
ASCII_ART = '''
____ _ ____
| __ ) __ _ ___(_)_ __ | __ ) ___ __ _
| _ \ / _` / __| | '_ \| _ \ / _ \ / _` |
| |_) | (_| \__ \ | | | | |_) | (_) | (_| |
|____/ \__,_|___/_|_| |_|____/ \___/ \__,_|
Testing Server
'''
#------------------------------------------------------------------------------
# Server
#------------------------------------------------------------------------------
SERVER_CONFIG = None
SERVER_RUN = True
IDLE_TIMEOUT = 300
LANG = None
#------------------------------------------------------------------------------
# World
#------------------------------------------------------------------------------
WORLD = None
CHARACTER_LOADER = None
MOB_LOADER = None
LOBBY = {} #.key is Client object, value is Guest object
_PLAYERS = {} #. key is Client object, valuse is Player object
PLAYERS = {} #. key is plaer name, value is Player object
COMMANDS = {} #. key is command name value is command function
|
14,370 | 110a84e89140e914ce40e0bab8be205f477353cc |
import math
from panda3d.core import TransparencyAttrib, Texture, Vec2, NodePath
from panda3d.core import Mat4, CSYupRight, TransformState, CSZupRight, LVecBase2i
from direct.gui.OnscreenImage import OnscreenImage
from direct.gui.DirectGui import DirectFrame
from LightManager import LightManager
from RenderTarget import RenderTarget
from RenderTargetType import RenderTargetType
from DebugObject import DebugObject
from BetterShader import BetterShader
from Antialiasing import Antialiasing
class RenderingPipeline(DebugObject):
def __init__(self, showbase):
DebugObject.__init__(self, "RenderingPipeline")
self.showbase = showbase
self.lightManager = LightManager()
self.size = self._getSize()
self.precomputeSize = Vec2(0)
self.camera = base.cam
self.cullBounds = None
self.patchSize = Vec2(32, 32)
self.temporalProjXOffs = 0
self.temporalProjFactor = 2
self.forwardScene = NodePath("Forward Rendering")
self.lastMVP = None
self._setup()
def _setup(self):
self.debug("Setting up render pipeline")
# First, we need no transparency
render.setAttrib(
TransparencyAttrib.make(TransparencyAttrib.MNone), 100)
# Now create deferred render buffers
self._makeDeferredTargets()
# Setup compute shader for lighting
self._createLightingPipeline()
# Setup combiner
self._createCombiner()
self.deferredTarget.setShader(BetterShader.load(
"Shader/DefaultPostProcess.vertex", "Shader/TextureDisplay.fragment"))
self._setupAntialiasing()
self._createFinalPass()
self.antialias.getFirstBuffer().setShaderInput(
"lastFrame", self.lightingComputeCombinedTex)
self.antialias.getFirstBuffer().setShaderInput(
"lastPosition", self.lastPositionBuffer)
self.antialias.getFirstBuffer().setShaderInput(
"currentPosition", self.deferredTarget.getColorTexture())
# self.deferredTarget.setShaderInput("sampler", self.lightingComputeCombinedTex)
# self.deferredTarget.setShaderInput("sampler", self.antialias.getResultTexture())
self.deferredTarget.setShaderInput(
"sampler", self.finalPass.getColorTexture())
# self.deferredTarget.setShaderInput("sampler", self.combiner.getColorTexture())
# self.deferredTarget.setShaderInput("sampler", self.lightingComputeCombinedTex)
# self.deferredTarget.setShaderInput("sampler", self.antialias._neighborBuffer.getColorTexture())
# self.deferredTarget.setShaderInput("sampler", self.antialias._blendBuffer.getColorTexture())
# self.deferredTarget.setShaderInput("sampler", self.lightingComputeCombinedTex)
# add update task
self._attachUpdateTask()
# compute first mvp
self._computeMVP()
self.lastLastMVP = self.lastMVP
# DirectFrame(frameColor=(1, 1, 1, 0.2), frameSize=(-0.28, 0.28, -0.27, 0.4), pos=(base.getAspectRatio() - 0.35, 0.0, 0.49))
self.atlasDisplayImage = OnscreenImage(image=self.lightManager.getAtlasTex(), pos=(
base.getAspectRatio() - 0.35, 0, 0.5), scale=(0.25, 0, 0.25))
self.lastPosImage = OnscreenImage(image=self.lightingComputeCombinedTex, pos=(
base.getAspectRatio() - 0.35, 0, -0.05), scale=(0.25, 0, 0.25))
# self.atlasDisplayImage = OnscreenImage(image = self.lightManager.getAtlasTex(), pos = (0,0,0), scale=(0.8,1,0.8))
# self.atlasDisplayImage = OnscreenImage(image = self.lightPerTileStorage, pos = (base.getAspectRatio() - 0.35, 0, 0.5), scale=(0.25,0,0.25))
def _createCombiner(self):
self.combiner = RenderTarget("Combine-Temporal")
self.combiner.setColorBits(8)
self.combiner.addRenderTexture(RenderTargetType.Color)
self.combiner.prepareOffscreenBuffer()
self.combiner.setShaderInput(
"currentComputation", self.lightingComputeContainer.getColorTexture())
self.combiner.setShaderInput(
"lastFrame", self.lightingComputeCombinedTex)
self.combiner.setShaderInput(
"positionBuffer", self.deferredTarget.getColorTexture())
self.combiner.setShaderInput(
"velocityBuffer", self.deferredTarget.getAuxTexture(1))
self.combiner.setShaderInput("lastPosition", self.lastPositionBuffer)
self._setCombinerShader()
def _setupAntialiasing(self):
self.debug("Creating antialiasing handler ..")
self.antialias = Antialiasing()
# self.antialias.setColorTexture(self.lightingComputeContainer.getColorTexture())
self.antialias.setColorTexture(self.combiner.getColorTexture())
self.antialias.setDepthTexture(self.deferredTarget.getDepthTexture())
self.antialias.setup()
# Creates all the render targets
def _makeDeferredTargets(self):
self.debug("Creating deferred targets")
self.deferredTarget = RenderTarget("DeferredTarget")
self.deferredTarget.addRenderTexture(RenderTargetType.Color)
self.deferredTarget.addRenderTexture(RenderTargetType.Depth)
self.deferredTarget.addRenderTexture(RenderTargetType.Aux0)
self.deferredTarget.addRenderTexture(RenderTargetType.Aux1)
self.deferredTarget.setAuxBits(16)
self.deferredTarget.setColorBits(16)
self.deferredTarget.setDepthBits(32)
# self.deferredTarget.setSize(400, 240) # check for overdraw
self.deferredTarget.prepareSceneRender()
def _createFinalPass(self):
self.debug("Creating final pass")
self.finalPass = RenderTarget("FinalPass")
self.finalPass.addRenderTexture(RenderTargetType.Color)
self.finalPass.prepareOffscreenBuffer()
colorTex = self.antialias.getResultTexture()
# Set wrap for motion blur
colorTex.setWrapU(Texture.WMMirror)
colorTex.setWrapV(Texture.WMMirror)
self.finalPass.setShaderInput("colorTex", colorTex)
self.finalPass.setShaderInput("velocityTex", self.deferredTarget.getAuxTexture(1))
self.finalPass.setShaderInput("depthTex", self.deferredTarget.getDepthTexture())
self._setFinalPassShader()
# Creates the storage to store the list of visible lights per tile
def _makeLightPerTileStorage(self):
storageSizeX = int(self.precomputeSize.x * 8)
storageSizeY = int(self.precomputeSize.y * 8)
self.debug(
"Creating per tile storage of size", storageSizeX, "x", storageSizeY)
self.lightPerTileStorage = Texture("LightsPerTile")
self.lightPerTileStorage.setup2dTexture(
storageSizeX, storageSizeY, Texture.TUnsignedShort, Texture.FR32i)
self.lightPerTileStorage.setMinfilter(Texture.FTNearest)
self.lightPerTileStorage.setMagfilter(Texture.FTNearest)
# Inits the lighting pipeline
def _createLightingPipeline(self):
self.debug("Creating lighting pipeline ..")
# size has to be a multiple of the compute unit size
# but still has to cover the whole screen
sizeX = int(math.ceil(self.size.x / self.patchSize.x))
sizeY = int(math.ceil(self.size.y / self.patchSize.y))
self.precomputeSize = Vec2(sizeX, sizeY)
self.debug("Batch size =", sizeX, "x", sizeY,
"Actual Buffer size=", int(sizeX * self.patchSize.x), "x", int(sizeY * self.patchSize.y))
self._makeLightPerTileStorage()
# Create a buffer which computes which light affects which tile
self._makeLightBoundsComputationBuffer(sizeX, sizeY)
# Create a buffer which applies the lighting
self._makeLightingComputeBuffer()
# Register for light manager
self.lightManager.setLightingComputator(self.lightingComputeContainer)
self.lightManager.setLightingCuller(self.lightBoundsComputeBuff)
self.lightingComputeContainer.setShaderInput(
"lightsPerTile", self.lightPerTileStorage)
self.lightingComputeContainer.setShaderInput(
"cameraPosition", base.cam.getPos(render))
# Ensure the images have the correct filter mode
for bmode in [RenderTargetType.Color]:
tex = self.lightBoundsComputeBuff.getTexture(bmode)
tex.setMinfilter(Texture.FTNearest)
tex.setMagfilter(Texture.FTNearest)
self._loadFallbackCubemap()
# Create storage for the bounds computation
# Set inputs
self.lightBoundsComputeBuff.setShaderInput(
"destination", self.lightPerTileStorage)
self.lightBoundsComputeBuff.setShaderInput(
"depth", self.deferredTarget.getDepthTexture())
self.lightingComputeContainer.setShaderInput(
"data0", self.deferredTarget.getColorTexture())
self.lightingComputeContainer.setShaderInput(
"data1", self.deferredTarget.getAuxTexture(0))
self.lightingComputeContainer.setShaderInput(
"data2", self.deferredTarget.getAuxTexture(1))
self.lightingComputeContainer.setShaderInput(
"shadowAtlas", self.lightManager.getAtlasTex())
self.lightingComputeContainer.setShaderInput(
"destination", self.lightingComputeCombinedTex)
# self.lightingComputeContainer.setShaderInput("sampleTex", loader.loadTexture("Data/Antialiasing/Unigine01.png"))
def _loadFallbackCubemap(self):
cubemap = loader.loadCubeMap("Cubemap/#.png")
cubemap.setMinfilter(Texture.FTLinearMipmapLinear)
cubemap.setMagfilter(Texture.FTLinearMipmapLinear)
cubemap.setFormat(Texture.F_srgb_alpha)
self.lightingComputeContainer.setShaderInput(
"fallbackCubemap", cubemap)
def _makeLightBoundsComputationBuffer(self, w, h):
self.debug("Creating light precomputation buffer of size", w, "x", h)
self.lightBoundsComputeBuff = RenderTarget("ComputeLightTileBounds")
self.lightBoundsComputeBuff.setSize(w, h)
self.lightBoundsComputeBuff.addRenderTexture(RenderTargetType.Color)
self.lightBoundsComputeBuff.setColorBits(16)
self.lightBoundsComputeBuff.prepareOffscreenBuffer()
self.lightBoundsComputeBuff.setShaderInput("mainCam", base.cam)
self.lightBoundsComputeBuff.setShaderInput("mainRender", base.render)
self._setPositionComputationShader()
def _makeLightingComputeBuffer(self):
self.lightingComputeContainer = RenderTarget("ComputeLighting")
self.lightingComputeContainer.setSize(
base.win.getXSize() / self.temporalProjFactor, base.win.getYSize())
self.lightingComputeContainer.addRenderTexture(RenderTargetType.Color)
self.lightingComputeContainer.setColorBits(16)
self.lightingComputeContainer.prepareOffscreenBuffer()
self.lightingComputeCombinedTex = Texture("Lighting-Compute-Combined")
self.lightingComputeCombinedTex.setup2dTexture(
base.win.getXSize(), base.win.getYSize(), Texture.TFloat, Texture.FRgba16)
self.lightingComputeCombinedTex.setMinfilter(Texture.FTLinear)
self.lightingComputeCombinedTex.setMagfilter(Texture.FTLinear)
self.lastPositionBuffer = Texture("Last-Position-Buffer")
self.lastPositionBuffer.setup2dTexture(
base.win.getXSize(), base.win.getYSize(), Texture.TFloat, Texture.FRgba16)
self.lastPositionBuffer.setMinfilter(Texture.FTNearest)
self.lastPositionBuffer.setMagfilter(Texture.FTNearest)
def _setLightingShader(self):
lightShader = BetterShader.load(
"Shader/DefaultPostProcess.vertex", "Shader/ApplyLighting.fragment")
self.lightingComputeContainer.setShader(lightShader)
def _setCombinerShader(self):
cShader = BetterShader.load(
"Shader/DefaultPostProcess.vertex", "Shader/Combiner.fragment")
self.combiner.setShader(cShader)
def _setPositionComputationShader(self):
pcShader = BetterShader.load(
"Shader/DefaultPostProcess.vertex", "Shader/PrecomputeLights.fragment")
self.lightBoundsComputeBuff.setShader(pcShader)
def _setFinalPassShader(self):
fShader = BetterShader.load(
"Shader/DefaultPostProcess.vertex", "Shader/Final.fragment")
self.finalPass.setShader(fShader)
def _getSize(self):
return Vec2(
int(self.showbase.win.getXSize()),
int(self.showbase.win.getYSize()))
def debugReloadShader(self):
self.lightManager.debugReloadShader()
self._setPositionComputationShader()
self._setCombinerShader()
self._setLightingShader()
self._setFinalPassShader()
self.antialias.reloadShader()
def _attachUpdateTask(self):
self.showbase.addTask(
self._update, "UpdateRenderingPipeline", sort=-10000)
def _computeCameraBounds(self):
# compute camera bounds in render space
cameraBounds = self.camera.node().getLens().makeBounds()
cameraBounds.xform(self.camera.getMat(render))
return cameraBounds
def _update(self, task=None):
self.temporalProjXOffs += 1
self.temporalProjXOffs = self.temporalProjXOffs % self.temporalProjFactor
self.cullBounds = self._computeCameraBounds()
self.lightManager.setCullBounds(self.cullBounds)
self.lightManager.update()
self.lightingComputeContainer.setShaderInput(
"cameraPosition", base.cam.getPos(render))
self.lightingComputeContainer.setShaderInput(
"temporalProjXOffs", LVecBase2i(self.temporalProjXOffs))
self.combiner.setShaderInput("lastMVP", self.lastMVP)
render.setShaderInput("lastMVP", self.lastMVP)
self.combiner.setShaderInput(
"temporalProjXOffs", LVecBase2i(self.temporalProjXOffs))
self._computeMVP()
self.combiner.setShaderInput("currentMVP", self.lastMVP)
self.combiner.setShaderInput("cameraPosition", base.cam.getPos(render))
if task is not None:
return task.cont
def _computeMVP(self):
projMat = Mat4.convertMat(
CSYupRight,
base.camLens.getCoordinateSystem()) * base.camLens.getProjectionMat()
transformMat = TransformState.makeMat(
Mat4.convertMat(base.win.getGsg().getInternalCoordinateSystem(),
CSZupRight))
modelViewMat = transformMat.invertCompose(
render.getTransform(base.cam)).getMat()
self.lastMVP = modelViewMat * projMat
# print "Self.lastMVP is now from frame",globalClock.getFrameTime()
def getLightManager(self):
return self.lightManager
def getDefaultObjectShader(self):
shader = BetterShader.load(
"Shader/DefaultObjectShader.vertex", "Shader/DefaultObjectShader.fragment")
return shader
|
14,371 | 5dd863e45f35315d1b2f345c808e38bd3be0db41 | from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.ForeignKey(User)
access_token = models.CharField(max_length=255, blank=True, null=True, editable=False)
profile_image_url = models.URLField(blank=True, null=True)
location = models.CharField(max_length=100, blank=True, null=True)
url = models.URLField(blank=True, null=True)
description = models.CharField(max_length=160, blank=True, null=True)
@staticmethod
def from_twitter(userinfo):
screen_name = userinfo.screen_name
user, created = User.objects.get_or_create(username=screen_name)
if created:
# create and set a random password so user cannot login using django built-in authentication
temp_password = User.objects.make_random_password(length=12)
user.set_password(temp_password)
user.first_name = userinfo.name
user.backend = "twitter"
user.save()
# Get the user profile
userprofile = UserProfile.get_or_create(user)
userprofile.user = user
userprofile.access_token = "token"
userprofile.url = userinfo.url
userprofile.location = userinfo.location
userprofile.description = userinfo.description
userprofile.profile_image_url = userinfo.profile_image_url
userprofile.save()
return userprofile
@staticmethod
def get_user_profile_from_user_name(username):
try:
u = User.objects.get(username = username)
return UserProfile.objects.get(user = u)
except:
return None
@staticmethod
def get_user_profile_from_user_id(id):
try:
return UserProfile.objects.get(user = User.objects.get(id=id))
except:
return None
@staticmethod
def get_or_create(user):
try:
return UserProfile.objects.get(user=user)
except:
return UserProfile()
def __str__(self):
return "%s's profile" % self.user |
14,372 | 0dfda0975201ed87d72b27cde9ac43f8d5a5afd5 | _author_ = "Jaiden Woods"
|
14,373 | 29a7453dab7606dab60cef1f9b5fb3d7a22cf219 | """ Entry point for the Application """
import os
import sys
import click
from flask_migrate import Migrate
from app import create_app
from api.models import db, Role, User
app = create_app(environment=os.environ.get('APP_SETTINGS', 'Development'))
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db)
@app.cli.command()
@click.option(
'--coverage/--no-coverage',
default=False,
help='Enable code coverage')
def test(coverage):
""" Run the unit tests """
print('success')
pass
@app.cli.command()
def create_database():
""" Create database tables from sqlalchemy models """
try:
db.create_all()
print('Created tables successfully!')
except Exception:
print('Failed to create db. Make sure your database server is running')
@app.cli.command()
def drop_database():
""" Drop database tables """
if click.confirm("Are you sure you want to lose all your data?", default=False):
try:
db.drop_all()
print("Dropped all tables successfully")
except Exception:
print("Failed, make sure your db server is running")
@app.cli.command()
def seed():
""" Seed database tables with initial data """
pass
|
14,374 | f4fdb21205bff2264b746cbdab6aac7aa5bc405c | class Nint(int):
def __radd__(self, other):
return int.__sub__(self,other)
a = Nint(5)
b = Nint(3)
print a + b
# 8
print 1 + b
#2
|
14,375 | f53fb1ad2c6dd487467c053d37706fb7e352120a | import pickle
import numpy as np
import os
from munch import munchify
from numpynet.utils import onehot
def read_data_sets(path='./dataset/cifar-10', one_hot=True):
X_train = []
y_train = []
for k in range(5):
X, y = load_data_batch(path, k + 1)
X_train.append(X)
y_train.append(y)
X_train = np.concatenate(X_train, axis=0)
y_train = onehot(np.concatenate(y_train, axis=0), 10)
X_valid = X_train[:5000]
y_valid = y_train[:5000]
X_train = X_train[5000:]
y_train = y_train[5000:]
X_test, y_test = load_test_batch(path)
y_test = onehot(y_test, 10)
return munchify({
'train': {
'images': X_train,
'labels': y_train,
},
'validation': {
'images': X_valid,
'labels': y_valid,
},
'test': {
'images': X_test,
'labels': y_test,
}
})
def label_names(path='./dataset/cifar-10'):
return pickle.load(open(os.path.join(path, 'batches.meta'), 'rb'))
def load_data_batch(path, k):
data_batch_name = 'data_batch_%d' % k
return _load_batch(os.path.join(path, data_batch_name))
def load_test_batch(path):
data_batch_name = 'test_batch'
return _load_batch(os.path.join(path, data_batch_name))
def _load_batch(path):
with open(path, 'rb') as fo:
batch = pickle.load(fo, encoding='bytes')
X = batch[b'data'] / 255.
y = np.array(batch[b'labels'])
return X, y
|
14,376 | bab33b900ec276268ea42b56787390fd89378f0f | """
Single Bubble Model: Bubble simulations
========================================
Use the ``TAMOC`` `single_bubble_model` to simulate the trajectory of a
natural gas bubble rising through the water column. This script demonstrates
the typical steps involved in running the single bubble model.
It uses the ambient data stored in the file `../test/output/test_bm54.nc`,
created by the `test_ambient` module. Please make sure all tests have
passed before running this script or modify the script to use a different
source of ambient data.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import seawater
from tamoc import single_bubble_model
import numpy as np
if __name__ == '__main__':
# Open an ambient profile object from the netCDF dataset
nc = '../../test/output/test_bm54.nc'
bm54 = ambient.Profile(nc, chem_names='all')
bm54.close_nc()
# Initialize a single_bubble_model.Model object with this data
sbm = single_bubble_model.Model(bm54)
# Create a natural gas particle to track
composition = ['methane', 'ethane', 'propane', 'oxygen']
gas = dbm.FluidParticle(composition, fp_type=0.)
# Set the mole fractions of each component at release. Note that oxygen
# is listed so that stripping from the water column can be simulated, but
# that the initial mole fraction of oxygen is zero. This is the normal
# behavior: any component not listed in the composition, even if it is
# present in the ambient CTD data, will not be simulated. The
# `composition` variable is the only means to tell the single bubble
# model what chemicals to track.
mol_frac = np.array([0.90, 0.07, 0.03, 0.0])
# Specify the remaining particle initial conditions
de = 0.005
z0 = 1000.
T0 = 273.15 + 30.
# Simulate the trajectory through the water column and plot the results
sbm.simulate(gas, z0, de, mol_frac, T0, K=0.5, K_T=1, fdis=1e-8,
delta_t=1.)
sbm.post_process()
# Save the simulation to a netCDF file
sbm.save_sim('./bubble.nc', '../../test/output/test_bm54.nc',
'Results of ./bubbles.py script')
# Save the data for importing into Matlab
sbm.save_txt('./bubble', '../../test/output/test_bm54.nc',
'Results of ./bubbles.py script')
|
14,377 | dcbf481a31f5ace0bab95895d014a520c8e15cc3 | #!/usr/bin/python3
import ledgerhelpers.legacy
from ledgerhelpers import diffing
CHAR_ENTER = "\n"
CHAR_COMMENT = ";#"
CHAR_NUMBER = "1234567890"
CHAR_TAB = "\t"
CHAR_WHITESPACE = " \t"
CHAR_CLEARED = "*"
CHAR_PENDING = "!"
STATE_CLEARED = CHAR_CLEARED
STATE_PENDING = CHAR_PENDING
STATE_UNCLEARED = None
def pos_within_items_to_row_and_col(pos, items):
row = 1
col = 1
for i, c in enumerate(items):
if i >= pos:
break
if c in CHAR_ENTER:
row += 1
col = 1
else:
col += 1
return row, col
def parse_date_from_transaction_contents(contents):
return ledgerhelpers.legacy.parse_date("".join(contents))
class Token(object):
def __init__(self, pos, contents):
self.pos = pos
if not isinstance(contents, str):
contents = "".join(contents)
self.contents = contents
def __str__(self):
return """<%s at pos %d len %d
%s>""" % (self.__class__.__name__, self.pos, len(self.contents), self.contents)
class TokenComment(Token):
pass
class TokenTransactionComment(Token):
pass
class TokenTransactionClearedFlag(Token):
pass
class TokenTransactionPendingFlag(Token):
pass
class TokenWhitespace(Token):
pass
class TokenTransaction(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
lexer = LedgerTransactionLexer(contents)
lexer.run()
def find_token(klass):
try:
return [t for t in lexer.tokens if isinstance(t, klass)][0]
except IndexError:
return None
try:
self.date = find_token(TokenTransactionDate).date
except AttributeError:
raise TransactionLexingError("no transaction date in transaction")
try:
self.secondary_date = find_token(
TokenTransactionSecondaryDate
).date
except AttributeError:
self.secondary_date = None
if find_token(TokenTransactionClearedFlag):
self.state = STATE_CLEARED
elif find_token(TokenTransactionPendingFlag):
self.state = STATE_PENDING
else:
self.state = STATE_UNCLEARED
if self.state != STATE_UNCLEARED:
self.clearing_date = (
self.secondary_date if self.secondary_date else self.date
)
else:
self.clearing_date = None
try:
self.payee = find_token(TokenTransactionPayee).payee
except AttributeError:
raise TransactionLexingError("no payee in transaction")
accountsamounts = [
t for t in lexer.tokens
if isinstance(t, TokenTransactionPostingAccount) or
isinstance(t, TokenTransactionPostingAmount)
]
x = []
last = None
for v in accountsamounts:
if isinstance(v, TokenTransactionPostingAccount):
assert type(last) in [
type(None), TokenTransactionPostingAmount
], lexer.tokens
elif isinstance(v, TokenTransactionPostingAmount):
assert type(last) in [
TokenTransactionPostingAccount
], lexer.tokens
x.append(
ledgerhelpers.TransactionPosting(
last.account, v.amount
)
)
last = v
assert len(x) * 2 == len(accountsamounts), lexer.tokens
self.postings = x
class TokenTransactionWithContext(TokenTransaction):
def __init__(self, pos, tokens):
self.transaction = [
t for t in tokens if isinstance(t, TokenTransaction)
][0]
self.pos = pos
self.contents = "".join(t.contents for t in tokens)
@property
def date(self):
return self.transaction.date
class TokenConversion(Token):
pass
class TokenPrice(Token):
pass
class TokenEmbeddedPython(Token):
pass
class TokenTransactionPostingAccount(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
self.account = ''.join(contents)
class TokenTransactionPostingAmount(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
self.amount = ''.join(contents)
class TokenEmbeddedTag(Token):
pass
class TokenTransactionDate(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
self.date = parse_date_from_transaction_contents(self.contents)
class TokenTransactionSecondaryDate(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
self.date = parse_date_from_transaction_contents(self.contents)
class TokenTransactionPayee(Token):
def __init__(self, pos, contents):
Token.__init__(self, pos, contents)
self.payee = ''.join(contents)
class LexingError(Exception):
pass
class TransactionLexingError(Exception):
pass
class EOF(LexingError):
pass
class GenericLexer(object):
def __init__(self, items):
if isinstance(items, str) and not isinstance(items, str):
self.items = tuple(items.decode("utf-8"))
else:
self.items = tuple(items)
self.start = 0
self.pos = 0
self._last_emitted_pos = self.pos
self.tokens = []
def __next__(self):
"""Returns the item at the current position, and advances the position."""
try:
t = self.items[self.pos]
except IndexError:
raise EOF()
self.pos += 1
return t
def peek(self):
"""Returns the item at the current position."""
try:
t = self.items[self.pos]
except IndexError:
raise EOF()
return t
def confirm_next(self, seq):
"""Returns True if each item in seq matches each corresponding item
from the current position onward."""
for n, i in enumerate(seq):
try:
if self.items[self.pos + n] != i:
return False
except IndexError:
return False
return True
def emit(self, klass, items):
"""Creates an instance of klass (a Token class) with the current
position and the supplied items as parameters, then
accumulates the instance into the self.tokens accumulator."""
token = klass(self.pos, items)
self._last_emitted_pos = self.pos
self.tokens += [token]
def more(self):
return self.pos < len(self.items)
class LedgerTextLexer(GenericLexer):
def __init__(self, text):
assert isinstance(text, str), type(text)
GenericLexer.__init__(self, text)
def state_parsing_toplevel_text(self):
"""Returns another state function."""
chars = []
while self.more():
if self.peek() in CHAR_COMMENT:
self.emit(TokenWhitespace, chars)
return self.state_parsing_comment
if self.peek() in CHAR_NUMBER:
self.emit(TokenWhitespace, chars)
return self.state_parsing_transaction
if self.confirm_next("P"):
self.emit(TokenWhitespace, chars)
return self.state_parsing_price
if self.confirm_next("C"):
self.emit(TokenWhitespace, chars)
return self.state_parsing_conversion
if self.confirm_next("python"):
self.emit(TokenWhitespace, chars)
return self.state_parsing_embedded_python
if self.confirm_next("tag"):
self.emit(TokenWhitespace, chars)
return self.state_parsing_embedded_tag
if self.peek() not in CHAR_WHITESPACE + CHAR_ENTER:
_, _, l2, c2 = self._coords()
raise LexingError(
"unparsable data at line %d, char %d" % (l2, c2)
)
chars += [next(self)]
self.emit(TokenWhitespace, chars)
return
def state_parsing_comment(self):
chars = [next(self)]
while self.more():
if chars[-1] in CHAR_ENTER and self.peek() not in CHAR_COMMENT:
break
chars.append(next(self))
self.emit(TokenComment, chars)
return self.state_parsing_toplevel_text
def state_parsing_price(self):
return self.state_parsing_embedded_directive(TokenPrice, False)
def state_parsing_conversion(self):
return self.state_parsing_embedded_directive(TokenConversion, False)
def state_parsing_embedded_tag(self):
return self.state_parsing_embedded_directive(TokenEmbeddedTag)
def state_parsing_embedded_python(self):
return self.state_parsing_embedded_directive(TokenEmbeddedPython)
def state_parsing_embedded_directive(self, klass, maybe_multiline=True):
chars = [next(self)]
while self.more():
if chars[-1] in CHAR_ENTER:
if not maybe_multiline:
break
if self.peek() in CHAR_WHITESPACE + CHAR_ENTER:
chars.append(next(self))
continue
if self.peek() in CHAR_COMMENT:
self.emit(klass, chars)
return self.state_parsing_comment
if self.peek() in CHAR_NUMBER:
self.emit(klass, chars)
return self.state_parsing_transaction
self.emit(klass, chars)
return self.state_parsing_toplevel_text
chars.append(next(self))
self.emit(klass, chars)
return self.state_parsing_toplevel_text
def state_parsing_transaction(self):
chars = [next(self)]
while self.more():
if chars[-1] in CHAR_ENTER and self.peek() not in CHAR_WHITESPACE:
break
chars.append(next(self))
self.emit(TokenTransaction, chars)
return self.state_parsing_toplevel_text
def _coords(self):
r, c = pos_within_items_to_row_and_col(self._last_emitted_pos, self.items)
r2, c2 = pos_within_items_to_row_and_col(self.pos, self.items)
return r, c, r2, c2
def run(self):
state = self.state_parsing_toplevel_text
while state:
try:
state = state()
except LexingError:
raise
except Exception as e:
l, c, l2, c2 = self._coords()
raise LexingError(
"bad ledger data between line %d, char %d and line %d, char %d: %s" % (
l, c, l2, c2, e
)
)
class LedgerTransactionLexer(GenericLexer):
def __init__(self, text):
GenericLexer.__init__(self, text)
def state_parsing_transaction_date(self):
chars = []
while self.more():
if self.peek() not in "0123456789-/":
self.emit(TokenTransactionDate, chars)
if self.confirm_next("="):
next(self)
return self.state_parsing_clearing_date
elif self.peek() in CHAR_WHITESPACE:
return self.state_parsing_cleared_flag_or_payee
else:
raise TransactionLexingError("invalid character %s" % self.peek())
chars += [next(self)]
raise TransactionLexingError("incomplete transaction")
def state_parsing_clearing_date(self):
chars = []
while self.more():
if self.peek() not in "0123456789-/":
next(self)
self.emit(TokenTransactionSecondaryDate, chars)
return self.state_parsing_cleared_flag_or_payee
chars += [next(self)]
raise TransactionLexingError("incomplete transaction")
def state_parsing_cleared_flag_or_payee(self):
while self.more():
if self.peek() in CHAR_WHITESPACE:
next(self)
continue
if self.peek() in CHAR_ENTER:
break
if self.confirm_next(CHAR_CLEARED):
self.emit(TokenTransactionClearedFlag, [next(self)])
return self.state_parsing_payee
if self.confirm_next(CHAR_PENDING):
self.emit(TokenTransactionPendingFlag, [next(self)])
return self.state_parsing_payee
return self.state_parsing_payee
raise TransactionLexingError("incomplete transaction")
def state_parsing_payee(self):
return self.state_parsing_rest_of_line(
TokenTransactionPayee,
self.state_parsing_transaction_posting_indentation)
def state_parsing_rest_of_line(
self,
klass, next_state,
allow_empty_values=False
):
chars = []
while self.more():
if self.peek() in CHAR_ENTER:
next(self)
while chars and chars[-1] in CHAR_WHITESPACE:
chars = chars[:-1]
break
if self.peek() in CHAR_WHITESPACE and not chars:
next(self)
continue
chars.append(next(self))
if allow_empty_values or chars:
self.emit(klass, chars)
return next_state
raise TransactionLexingError("incomplete transaction")
def state_parsing_transaction_posting_indentation(self):
chars = []
while self.more():
if self.peek() not in CHAR_WHITESPACE:
break
chars.append(next(self))
if not chars:
return
if self.more() and self.peek() in CHAR_ENTER:
next(self)
return self.state_parsing_transaction_posting_indentation
return self.state_parsing_transaction_posting_account
def state_parsing_transaction_comment(self):
return self.state_parsing_rest_of_line(
TokenTransactionComment,
self.state_parsing_transaction_posting_indentation)
def state_parsing_transaction_posting_account(self):
chars = []
if self.peek() in CHAR_COMMENT:
return self.state_parsing_transaction_comment
while self.more():
if (
(self.peek() in CHAR_WHITESPACE and
chars and chars[-1] in CHAR_WHITESPACE) or
self.peek() in CHAR_TAB or
self.peek() in CHAR_ENTER
):
while chars[-1] in CHAR_WHITESPACE:
chars = chars[:-1]
break
chars.append(next(self))
if not chars:
raise TransactionLexingError("truncated transaction posting")
self.emit(TokenTransactionPostingAccount, chars)
return self.state_parsing_transaction_posting_amount
def state_parsing_transaction_posting_amount(self):
return self.state_parsing_rest_of_line(
TokenTransactionPostingAmount,
self.state_parsing_transaction_posting_indentation,
allow_empty_values=True)
def run(self):
state = self.state_parsing_transaction_date
while state:
state = state()
class LedgerContextualLexer(GenericLexer):
def state_parsing_toplevel(self):
while self.more():
if isinstance(self.peek(), TokenComment):
return self.state_parsing_comment
token = next(self)
self.emit(token.__class__, token.contents)
def state_parsing_comment(self):
token = next(self)
if (
self.more() and
isinstance(token, TokenComment) and
isinstance(self.peek(), TokenTransaction)
):
transaction_token = next(self)
additional_comments = []
while self.more() and isinstance(self.peek(), TokenComment):
additional_comments.append(next(self))
self.emit(TokenTransactionWithContext,
[token, transaction_token] + additional_comments)
else:
self.emit(token.__class__, token.contents)
return self.state_parsing_toplevel
def run(self):
state = self.state_parsing_toplevel
while state:
try:
state = state()
except LexingError:
raise
except Exception as e:
raise LexingError(
"error parsing ledger data between chunk %d and chunk %d): %s" % (
self._last_emitted_pos, self.pos, e
)
)
def lex_ledger_file_contents(text, debug=False):
lexer = LedgerTextLexer(text)
lexer.run()
concat_lexed = "".join([x.contents for x in lexer.tokens])
if concat_lexed != text:
if debug:
u = "Debugging error lexing text: files differ\n\n"
diffing.two_way_diff(u + text, u + concat_lexed)
raise LexingError("the lexed contents and the original contents are not the same")
lexer = LedgerContextualLexer(lexer.tokens)
lexer.run()
concat_lexed = "".join([ x.contents for x in lexer.tokens ])
if concat_lexed != text:
if debug:
u = "Debugging error lexing chunks: files differ\n\n"
diffing.two_way_diff(u + text, u + concat_lexed)
raise LexingError("the lexed chunks and the original chunks are not the same")
return lexer.tokens
|
14,378 | 2cccc226675e1298212396af07a7926ada6c8f56 | from flask import Flask, request, Response, jsonify
app = Flask(__name__)
global abc
abc={}
@app.route('/', methods=['GET'])
def home():
return "Hello World!"
@app.route('/users', methods=['POST'])
def post():
abc['id'] = 1
abc['name'] = request.form["name"]
#name = request.form["name"]
#line= ("Hello {}!".format(name))
#return jsonify(line)
return jsonify(abc),201
@app.route('/users/<id1>', methods=['GET'])
def get(id1):
if abc['id']==int(id1):
return jsonify(abc),200
@app.route('/users/<id1>', methods=['DELETE'])
def delete(id1):
if abc['id']==int(id1):
del abc['id']
del abc['name']
#abc.remove(abc[0])
if abc=={}:
return '',204
return jsonify(abc) |
14,379 | b37e79522207b4d4946ae9c6aa5af942f11ea896 | from message_sender import MessageSender
class EmailMessageSender(MessageSender):
def send_message(self, message):
print("EmailMessageSender: Sending email message...")
|
14,380 | ed27d1e544cf693b2775424c1cc6a5c274a05203 | def pattern(n):
# for loop for rows
for i in range(1,n+1):
# conditional operator
if(i % 2 != 0):
k =i + 1
else:
i
# for loop for printing spaces
for g in range(k,n):
if g>=k:
print(end=" ")
# according to value of k carry
# out further operation
for j in range(0,k):
if j == k - 1:
print(" * ")
else:
print(" * ", end = " ")
# Driver code
n = 10
pattern(n)
#===========================================================
dict = {}
i=0
while i<3:
i = i+1
name = input("Enter name")
dob = input("Enter dob")
dict[name]=dob
print(dict)
name_key = input("Enter name to find the birthday:")
print("Hi ",name_key," Your Bith Date is ",dict[name_key])
|
14,381 | 51ea1890072846ce6f31fc2e0d689fe9d84c517d | # NOTE: Keeping this commented out fixes our memory issue!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# import time
# print("HELLO WORLD %sms" % (time.monotonic() / 1000)) # memory issue might be due to using %sms instead of {} and format FYI.
print("HELLO WORLD")
|
14,382 | fcc4d1df2d9c976a575560f5980657f3e2a3ab4a | """
Source Code:
https://pytorch.org/tutorials/beginner/nlp/advanced_tutorial.html
"""
import torch
import torch.nn as nn
from torch import Tensor
NOT_POSSIBLE_TRANSITION = -1e8
def log_sum_exp(x: Tensor):
max_score, _ = x.max(dim=-1)
return max_score + (x - max_score.unsqueeze(-1)).exp().sum(-1).log()
class CRF(nn.Module):
"""
CRF class to model the transitions between tags.
"""
def __init__(self, in_features: int, num_tags: int, device: str="cpu"):
super(CRF, self).__init__()
self.num_tags = num_tags + 2
self.start_idx = self.num_tags - 2
self.stop_idx = self.num_tags - 1
self.linear = nn.Linear(in_features, self.num_tags)
self.device = device
# Transition matrix to model the transition probabilities between tags (states)
self.transition_matrix = nn.Parameter(torch.randn(self.num_tags, self.num_tags),
requires_grad=True)
# Transitioning from any tag to start tag is not possible.
self.transition_matrix.data[self.start_idx, :] = NOT_POSSIBLE_TRANSITION
# Transitioning from stop tag to any other tag is not possible.
self.transition_matrix.data[:, self.stop_idx] = NOT_POSSIBLE_TRANSITION
def forward(self, features: Tensor, masks: Tensor) -> Tensor:
features = self.linear(features)
return self.viterbi(features, masks[:, :features.size(1)].float())
def loss(self, features: Tensor, tags: Tensor, masks: Tensor):
"""
Computing the negative log-likelihood loss.
"""
features = self.linear(features)
T = features.size(1)
masks_ = masks[:, :T].float()
forward_score = self.forward_algorithm(features, masks_)
gold_score = self._score(features, tags[:, :T].long(), masks_)
loss = (forward_score - gold_score).mean()
return loss
def _score(self, features: Tensor, tags: Tensor, masks: Tensor):
"""
Scoring the sentence for given tags.
"""
B, T, H = features.shape
emit_scores = features.gather(dim=2, index=tags.unsqueeze(dim=-1)).squeeze(-1)
start_tag = torch.full((B, 1), fill_value=self.start_idx, dtype=torch.long, device=self.device)
tags = torch.cat([start_tag, tags], dim=1)
transition_scores = self.transition_matrix[tags[:, 1:], tags[:, :-1]]
last_tag = tags.gather(dim=1, index=masks.sum(dim=1).long().unsqueeze(1)).squeeze(1)
last_score = self.transition_matrix[self.stop_idx, last_tag]
score = ((transition_scores + emit_scores) * masks).sum(dim=1) + last_score
return score
def viterbi(self, features: Tensor, masks: Tensor):
"""
Decoding the tags with the Viterbi algorithm.
"""
B, T, H = features.shape
backpointers = torch.zeros(B, T, H, dtype=torch.long, device=self.device)
max_score = torch.full((B, H), NOT_POSSIBLE_TRANSITION, device=self.device)
# From start tag to any other tag
max_score[:, self.start_idx] = 0
# For every single timestep.
for t in range(T):
mask_t = masks[:, t].unsqueeze(1)
emit_score_t = features[:, t]
acc_score_t = max_score.unsqueeze(1) + self.transition_matrix
acc_score_t, backpointers[:, t, :] = acc_score_t.max(dim=-1)
acc_score_t += emit_score_t
max_score = acc_score_t * mask_t + max_score * (1 - mask_t)
max_score += self.transition_matrix[self.stop_idx]
best_score, best_tag = max_score.max(dim=-1)
best_paths = []
backpointers = backpointers.cpu().numpy()
for b in range(B):
best_tag_b = best_tag[b].item()
seq_len = int(masks[b, :].sum().item())
best_path = [best_tag_b]
for bps_t in reversed(backpointers[b, :seq_len]):
best_tag_b = bps_t[best_tag_b]
best_path.append(best_tag_b)
best_paths.append(best_path[-2::-1])
return best_score, best_paths
def forward_algorithm(self, features: Tensor, masks: Tensor):
B, T, H = features.shape
scores = torch.full((B, H), NOT_POSSIBLE_TRANSITION, device=self.device)
scores[:, self.start_idx] = 0.0
transition = self.transition_matrix.unsqueeze(0)
for t in range(T):
emit_score_t = features[:, t].unsqueeze(2)
score_t = scores.unsqueeze(1) + transition + emit_score_t
score_t = log_sum_exp(score_t)
mask_t = masks[:, t].unsqueeze(1)
scores = score_t * mask_t + scores * (1 - mask_t)
scores = log_sum_exp(scores + self.transition_matrix[self.stop_idx])
return scores
|
14,383 | b2b161978c101da3a33f5cbf9d561696a747c080 | nam = raw_input('Who are you?')
print 'Welcome',nam
hrs = raw_input("Enter Hours:")
rate = raw_input("Enter Rate:")
hrs = float(hrs)
rate = float(rate)
grosspay = hrs * rate
print 'Gross pay is',grosspay |
14,384 | a5ab72f175cdee773d01d2776ad3c60fa130fdff | import argparse
import sys
from config import *
# from data_utils.extract_pe_features import *
from data_utils.bin_to_img import *
# from data_utils.extract_opcode import *
from data_utils.misc import *
from data_utils.data_loaders import *
from pathlib import Path
def main():
max_files = 0 # set 0 to process all files or set a specific number
# if args.extract_pe_features:
# extract_pe_features(ORG_DATASET_PE_FEATURES_CSV, ORG_DATASET_COUNT_PE_FEATURES_CSV, ORG_DATASET_PATH,
# max_files=max_files)
# completed modification for us
if args.bin_to_img:
dataset_root = Path(ORG_DATASET_ROOT_PATH).resolve()
dataset = Path(ORG_DATASET_PATH).resolve()
list_of_widths = [0, 1, 64, 128, 256, 512, 1024]
for width in list_of_widths:
convert_bin_to_img(dataset.__str__(), width, max_files=max_files)
if args.count_samples:
count_dataset(ORG_DATASET_PATH, ORG_DATASET_COUNT_CSV)
count_dataset(ORG_DATASET_OPCODES_PATH, ORG_DATASET_COUNT_OPCODES_PATH)
count_dataset(get_image_datapath(image_dim=256), ORG_DATASET_COUNT_IMAGES_CSV)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process the Malware data')
parser.add_argument('--bin_to_img', action='store_true', help='Generate image files from malware binaries',
default=False)
parser.add_argument('--count_samples', action='store_true', help='Count all sample files for all experiments',
default=False)
args = parser.parse_args()
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
main()
|
14,385 | 4a5c5a7b31ebb9652ee8024b994f603e1c243c56 | class RandomizedCollection:
def __init__(self):
"""
Initialize your data structure here.
"""
def insert(self, val: int) -> bool:
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
"""
def remove(self, val: int) -> bool:
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
"""
def getRandom(self) -> int:
"""
Get a random element from the collection.
"""
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
|
14,386 | 52315e3f85eb499dd1acbd44cc653c253f3a814a |
print(chr(ord('生') & ord('死')))
|
14,387 | 0160b2df83ec9ae1faca1181eed5015225c66ae8 | #!/usr/bin/env python
import sys
def parse( filename ) :
clauses = []
for line in open( filename ) :
if line.startswith( 'c' ) : continue
if line.startswith( 'p' ) : continue
clause = [ int(x) for x in line[:-2].split() ]
clauses.append( clause )
return clauses
def bcp( formula, unit ) :
modified = []
for clause in formula :
if unit in clause : continue
if -unit in clause :
c = [ x for x in clause if x != -unit ]
if len( c ) == 0 : return -1
modified.append( c )
else :
modified.append( clause )
return modified
def get_counter( formula ) :
counter = {}
for clause in formula :
for literal in clause :
if literal in counter :
counter[ literal ] += 1
else :
counter[ literal ] = 0
return counter
def pure_literal( formula, counter, assignment ) :
pures = [] # [ x for x,y in counter.items() if -x not in counter ]
for literal, times in counter.items() :
if -literal not in counter : pures.append( literal )
for pure in pures :
formula = bcp( formula, pure )
assignment += pures
return formula
def backtracking( formula, assignment ) :
counter = get_counter( formula )
formula = pure_literal( formula, counter, assignment )
if not formula :
return assignment
unit_clauses = [ c for c in formula if len( c ) == 1 ]
while len( unit_clauses ) > 0 :
unit = unit_clauses[ 0 ]
formula = bcp( formula, unit[0] )
assignment += [ unit[0] ]
if formula == -1 :
return []
if not formula :
return assignment
unit_clauses = [ c for c in formula if len( c ) == 1 ]
variable = max( counter )
solution = backtracking( bcp( formula, variable ), assignment + [variable] )
if not solution :
solution = backtracking( bcp( formula, -variable ), assignment + [-variable] )
return solution
def main() :
clauses = parse( sys.argv[1] )
solution = backtracking( clauses, [] )
print ' '.join( [ str(x) for x in solution ] )
if __name__ == '__main__':
main()
|
14,388 | 2dd63ac11491f19820e3332c0347d3e23705341a | def Healt_calc(age, apples, cig):
health = (100-age) + apples*2 - (cig*2.8)
print(health)
Healt_calc(22,5,7)
venkat_data = [22,5,7]
Healt_calc(venkat_data[0],venkat_data[1],venkat_data[2])
Healt_calc(*venkat_data) # UNPACKING ARGUMENT |
14,389 | 7c8f06dfd55bc362e2eed1fed4bbeaad30fbe8c3 | #!/usr/bin/python
#-*- coding: utf-8 -*-
from oled.serial import i2c, spi
from oled.device import sh1106, ssd1306
from oled.render import canvas
import sys, subprocess, os
import requests
import time
import datetime
from PIL import ImageFont
import fonts
import thread
reload(sys)
sys.setdefaultencoding( 'utf-8' )
now = datetime.datetime.now()
if now.minute < 35:
now = now - datetime.timedelta(hours=1)
ymd = str(now.year) + str('%02d' % now.month) + str('%02d' % now.day)
hour = str('%02d' % now.hour) + '00'
homex = '68'
homey = '100'
weatherUrlBase = 'http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastGrib'
Servicekey = 'v5am6iSNaarfzPqtr%2FJQ7%2BUKWInoHUBn%2BH6MCS72%2BZPliNo9sC9tJxB0yxg%2B0KrvJNuTOqf3ZitqKPyXdcongQ%3D%3D'
url = weatherUrlBase + '?ServiceKey=' + Servicekey + '&base_date=' + ymd + '&base_time=' + hour + '&nx=' + homex + '&ny=' + homey + '&_type=json'
r = requests.get(url)
#print url
#print r.json()
tmp = {}
for data in r.json()['response']['body']['items']['item']:
tmp[data['category']] = data['obsrValue']
tem = str(tmp['T1H'])
hum = str(tmp['REH'])
print u'온도:' + tem
print u'습도:' + hum
Mine = ImageFont.truetype('/home/pi/fonts/Minecraftia-Regular.ttf', 12)
gulim12 = ImageFont.truetype('/home/pi/fonts/NGULIM.TTF', 12)
def main():
with canvas(device) as draw:
draw.text((0, 0), u' 온도:' + tem, font = gulim12, fill = 'white')
draw.text((0, 16), u' 습도:' + hum, font = gulim12, fill = 'white')
while True:
time.sleep(1)
if __name__ == "__main__":
try:
device = sh1106(i2c(port=1, address=0x3c))
except IOError:
try:
device = sh1106(spi(device=0, port=0))
except IOError:
sys.exit(1)
main()
|
14,390 | 9f7d6f869c5e0ef27c77099770e8f5b57bfd7661 | x=y=0
ans=1
for _ in range (int(input())):
a,b=map(int,input().split())
ans+=max(0,min(a,b)-max(x,y)+(x!=y))
x,y=a,b
print(ans) |
14,391 | aa009dc9f6f58521b00af83637cb2958d9f2d8c1 | #!/usr/bin/env python
#Copyright 2017 Martin Cooney
#This file is subject to the terms and conditions defined in file 'Readme.md', which is part of this source code package.
import cv2
import numpy as np
import sys
basename = '../../data/objects/objects1'
if len(sys.argv) > 1:
basename = sys.argv[1]
thermalFileName = "%s-thermal1.avi" % basename
rgbFileName = "%s-rgb.avi" % basename
cap=cv2.VideoCapture(rgbFileName)
cap_thermal=cv2.VideoCapture(thermalFileName)
currentFrame=0
cv2.namedWindow("RGB")
cv2.moveWindow("RGB",0,200)
cv2.namedWindow("Thermal")
cv2.moveWindow("Thermal",320,200)
print ""
print "--------------------------"
print "= Find frames (MAY 2017) ="
print "--------------------------"
print ""
print("Press f to output frame number, w to write a frame to image, and q to quit")
while(cap.isOpened() and cap_thermal.isOpened()):
ret, frame = cap.read()
ret, frame_thermal = cap_thermal.read()
if not (frame is None or frame_thermal is None):
currentFrame=currentFrame+1
cv2.imshow('RGB', frame)
cv2.imshow('Thermal', frame_thermal)
key = cv2.waitKey(50)
if key == ord('q'):
break
elif key == ord('f'): #output the current frame number to console
print currentFrame
elif key == ord('w'): #output the current frame as an image
if not (frame is None or frame_thermal is None):
print currentFrame
filename = "../../data/captured_frames/cap%d_rgb.jpg" % currentFrame
cv2.imwrite(filename, frame)
filename = "../../data/captured_frames/cap%d_thermal.jpg" % currentFrame
cv2.imwrite(filename, frame_thermal)
cap.release()
cap_thermal.release()
cv2.destroyAllWindows()
|
14,392 | 31d289009ef9956845343afe2a48825c5cf7d1e0 | import numpy as np
import os
import modeling.motion_model.motion_model as motion
import shutil
import math
import modeling.measurement_update as measurement
import modeling.resampling as resample
import config.set_parameters as sp
import utils.data_process as data_process
from utils.visualize import *
#from utils.visualize_proposer import *
import torch
import torch.utils
import torch.utils.data
import torch
from tensorboardX import SummaryWriter
class DPF:
def __init__(self, train_set=None, eval_set=None, means=None, stds=None, visualize=False,
state_step_sizes_=None, state_min=None, state_max=None):
self.train_set = train_set
self.eval_set = eval_set
self.means = means
self.stds = stds
self.state_step_sizes_ = state_step_sizes_
self.state_min = state_min
self.state_max = state_max
self.visualize = visualize
self.motion_model = motion.MotionModel()
self.observation_encoder = measurement.ObservationEncoder()
self.particle_proposer = measurement.ParticleProposer()
self.likelihood_estimator = measurement.ObservationLikelihoodEstimator()
self.resampling = resample.particle_resampling
params = sp.Params()
self.globalparam = params.globalparam
self.trainparam = params.train
self.testparam = params.test
self.end2end = False
# self.use_cuda = torch.cuda.is_available()
self.use_cuda = False
self.use_proposer = False
self.log_freq = 10 # Steps
self.test_freq = 2 # Epoch
def particles_to_state(self, particle_list, particle_probs_list):
""" Get predicted state from the particles
Args:
particle_list: Tensor with size (N, T, particle_num, 3), containing the particles at different time step
particle_probs_list: Tensor with size (N, T, particle_num), corresponds to the particle probabilities
Returns:
Tensor with size (N, T, 4), each state is 4-dim with (x, y, cos(theta), sin(theta))
"""
particle_probs_list = particle_probs_list.view(particle_probs_list.size(0), particle_probs_list.size(1),
particle_probs_list.size(2), 1)
mean_position = torch.sum(particle_probs_list.repeat(1, 1, 1, 2)
* particle_list[:, :, :, :2], 2)
mean_orientation = torch.atan2(
torch.sum(particle_probs_list * torch.cos(particle_list[:, :, :, 2:]), 2),
torch.sum(particle_probs_list * torch.sin(particle_list[:, :, :, 2:]), 2))
return torch.cat([mean_position, mean_orientation], 2)
def connect_modules(self, particle_num, sta, obs, act, motion_mode=0, phrase=None):
""" Connect all the modules together to form the whole DPF system
Args:
sta: Tensor with size (N, T, 3), states
obs: Tensor with size (N, T, 3, H, W), observations
act: Tensor with size (N, T, 3), actions
Returns:
particle_list: Tensor with size (N, T, particle_num, 3), particles at different time step
particle_probs_list: Tensor with size (N, T, particle_num),
the particle probabilities at different time step
"""
propose_ratio = self.globalparam['propose_ratio']
N = sta.shape[0]
encoding = self.observation_encoder(obs.reshape(-1, 24, 24, 3).permute(0,3,1,2))
# initialize particles
# initial particles: (30, 1000, 3)
if self.globalparam['init_with_true_state']:
# tracking with known initial state
initial_particles = sta[:, 0:1, :].repeat(1, particle_num, 1).float()
else:
# global localization
if self.globalparam['use_proposer']:
# propose particles from observations
initial_particles = self.propose_particle(encoding[0:N, ...], particle_num, self.state_min, self.state_max)
else:
# sample particles randomly
x = torch.empty(sta.size(0), particle_num, 1).uniform_(self.state_min[0], self.state_max[0])
y = torch.empty(sta.size(0), particle_num, 1).uniform_(self.state_min[1], self.state_max[1])
theta = torch.empty(sta.size(0), particle_num, 1).uniform_(self.state_min[2], self.state_max[2])
initial_particles = torch.cat((x, y, theta), -1)
# shape (30, 1000)
initial_particle_probs = torch.ones(sta.size(0), particle_num) / particle_num
particles = initial_particles
particle_probs = initial_particle_probs
particle_list = particles.view(particles.size(0), -1, particle_num, 3)
particle_probs_list = particle_probs.view(particles.size(0), -1, particle_num)
for t in range(1, sta.size(1)):
propose_num = int(particle_num * (propose_ratio ** (t+1)))
resample_num = particle_num - propose_num
if propose_ratio < 1.0:
# resample, shape (N, resample_num, 3)
particles = self.resampling(particles, particle_probs, resample_num)
# motion update
act_ = act.unsqueeze(2)
sta_ = sta.unsqueeze(2)
act_ = act_.repeat(1, 1, particle_num, 1)
sta_ = sta_.repeat(1, 1, particle_num, 1)
particles = self.motion_model(act_[:, t:t+1, :, :].view(-1, particle_num, 3).float(),
particles.float(),
sta_[:, t:t+1, :, :].view(-1, particle_num, 3).float(),
self.stds,
self.means,
self.state_step_sizes_,
motion_mode,
phrase)
# measurement update
# get shape (N, 1, resample_num)
particle_probs = self.get_likelihood(particles.double(), obs[:, t:t+1, :, :, :]).float()
# (N, resample_num)
particle_probs = particle_probs.squeeze()
if propose_ratio > 0:
proposed_particles = self.propose_particles(encoding[t*N:(t+1*N), ...],\
propose_num , self.state_min, self.state_max)
proposed_particle_probs = torch.ones([N, propose_num])
if propose_ratio == 1.0:
# all proposed particles
particles = proposed_particles
particle_probs = proposed_particle_probs
elif propose_ratio == 0:
# all standard particles
particles = particles
particle_probs = particle_probs
else:
particle_probs *= (resample_num / particle_num) / torch.sum(particle_probs, dim=1)
proposed_particle_probs *= (propose_num / particle_num) / torch.sum(proposed_particle_probs, dim=1)
particles = torch.cat([particles, proposed_particles], axis=1) # dimension is wrong!
particle_probs = torch.cat([particle_probs, proposed_particle_probs], axis=1) # dimension is wrong!
# normalize probabilities
particle_probs /= torch.sum(particle_probs, dim=1, keepdim=True)
particle_list = torch.cat((particle_list,
particles.view(particles.size(0), 1, particles.size(1), particles.size(2))),
1)
particle_probs_list = torch.cat((particle_probs_list,
particle_probs.view(particle_probs.size(0), 1,
particle_probs.size(1))),
1)
pred_state = self.particles_to_state(particle_list, particle_probs_list)
return particle_list, particle_probs_list, pred_state
def train_motion_model(self, mode=0, phrase=None, dynamics_model_path=None):
""" Train the motion model f and g.
:return:
"""
batch_size = self.trainparam['batch_size']
epochs = self.trainparam['epochs']
lr = self.trainparam['learning_rate']
particle_num = self.trainparam['particle_num']
state_step_sizes = self.state_step_sizes_
motion_model = self.motion_model
if mode == 1 and phrase == 1:
motion_model.load_state_dict(torch.load(dynamics_model_path))
log_dir = 'log/motion_model/mode_{}/'.format(mode)
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir)
writer = SummaryWriter(log_dir)
save_dir = 'model/motion_model/mode_{}/'.format(mode)
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if self.use_cuda:
motion_model = motion_model.cuda()
train_loader = torch.utils.data.DataLoader(
self.train_set,
batch_size=batch_size,
shuffle=False,
num_workers=self.globalparam['workers'],
pin_memory=True,
sampler=None)
val_loader = torch.utils.data.DataLoader(
self.eval_set,
batch_size=batch_size,
shuffle=False,
num_workers=self.globalparam['workers'],
pin_memory=True)
optimizer = torch.optim.Adam(motion_model.parameters(), lr)
niter = 0
for epoch in range(epochs):
motion_model.train()
for iteration, (sta, obs, act) in enumerate(train_loader):
# Build ground truth inputs of size (batch_size, num_particles, 3)
#
# -actions action at current time step
# -particles: true state at previous time step
# -states: true state at current time step
# Shape: (batch_size, seq_len, 1, 3)
act = act.unsqueeze(2)
sta = sta.unsqueeze(2)
# Shape: (batch_size, seq_len, num_particle, 3)
actions = act.repeat(1, 1, particle_num, 1).float()
states = sta.repeat(1, 1, particle_num, 1).float()
# Shape: (batch_size*(seq_len-1), num_particle, 3)
actions = actions[:, 1:, :, :].contiguous().view(-1, particle_num, act.size(3))
particles = states[:, :-1, :, :].contiguous().view(-1, particle_num, sta.size(3))
states = states[:, 1:, :, :].contiguous().view(-1, particle_num, sta.size(3))
if self.use_cuda:
actions = actions.cuda()
particles = particles.cuda()
states = states.cuda()
# Feedforward and compute loss
moved_particles = motion_model(actions,
particles,
states,
self.stds,
self.means,
state_step_sizes,
mode,
phrase)
loss = motion_model.loss
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
niter += 1
if niter % self.log_freq == 0:
print("Epoch:{}, Iteration:{}, loss:{}".format(epoch, niter, loss))
writer.add_scalar('train/loss', loss, niter)
if niter % 1000 == 0:
if mode == 1 and phrase == 0:
torch.save(motion_model.state_dict(), save_dir+'dynamic_model_' + repr(niter) + '.pth')
else:
torch.save(motion_model.state_dict(), save_dir+'motion_model_' + repr(niter) + '.pth')
if mode == 1 and phrase == 0:
torch.save(motion_model.state_dict(), save_dir+'dynamic_model.pth')
else:
torch.save(motion_model.state_dict(), save_dir+'motion_model.pth')
def train_particle_proposer(self):
""" Train the particle proposer k.
:return:
"""
batch_size = self.trainparam['batch_size']
# epochs = self.trainparam['epochs']
epochs = 500
lr = self.trainparam['learning_rate']
particle_num = self.trainparam['particle_num']
std = 0.2
encoder_checkpoint = "encoder.pth"
log_dir = 'particle_proposer_log'
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
log_writer = SummaryWriter(log_dir)
check_point_dir = 'particle_proposer_checkpoint'
if not os.path.exists(check_point_dir):
os.makedirs(check_point_dir)
optimizer = torch.optim.Adam(self.particle_proposer.parameters(), lr)
# use trained Observation encoder to get encodings of observation
if os.path.isfile(encoder_checkpoint):
checkpoint = torch.load(encoder_checkpoint)
self.observation_encoder.load_state_dict(checkpoint)
print("Check point loaded!")
else:
print("Invalid check point directory...")
# freeze observation encoder
self.observation_encoder.eval()
if self.use_cuda:
self.observation_encoder.cuda()
self.particle_proposer.cuda()
train_loader = torch.utils.data.DataLoader(
self.train_set,
batch_size=batch_size,
shuffle=True,
num_workers=self.globalparam['workers'],
pin_memory=True,
sampler=None)
val_loader = torch.utils.data.DataLoader(
self.eval_set,
batch_size=batch_size,
shuffle=True,
num_workers=self.globalparam['workers'],
pin_memory=True)
niter = 0
propose_num = 100
for epoch in range(epochs):
self.particle_proposer.train()
for i, (sta, obs, act) in enumerate(train_loader):
obs = obs.cuda().reshape(-1, 24, 24, 3).permute(0,3,1,2).float()
sta = sta.cuda()
encoding = self.observation_encoder(obs)
new_particles = self.propose_particle(encoding, \
propose_num, self.state_min, self.state_max)
dists = data_process.square_distance_proposer(sta, new_particles, propose_num, self.state_step_sizes_)
# Transform distances to probabilities sampled from a normal distribution
dist_probs = (1 / float(new_particles.size(0))) / ((2 * np.pi * std ** 2)**0.5) * torch.exp(-dists / (2.0 * std ** 2))
# Add e for numerical stability
e = 1e-16
# Compute most likelihood estimate loss
mle_loss = torch.mean(-torch.log(e + torch.sum(dist_probs, dim=-1)))
if niter % 100 == 0:
print('Epoch {}/{}, Batch {}/{}: Train loss: {}'.format(epoch, epochs, i, len(train_loader), mle_loss.item()))
log_writer.add_scalar('train/loss', mle_loss.item(), niter)
optimizer.zero_grad()
mle_loss.backward()
optimizer.step()
niter += 1
eval_encoding = self.observation_encoder(obs[0,...].unsqueeze(0))
eval_particles = self.propose_particle(eval_encoding, 1000, self.state_min, self.state_max)
plot_proposer('nav01', "./output/train_" + str(epoch),\
eval_particles[:1000, :].cpu().detach().numpy(), sta[0,0,...].cpu().numpy())
val_loss = self.eval_particle_proposer(val_loader, epoch)
print('Epoch {}/{}: Validation loss: {}'.format(epoch, epochs, val_loss))
log_writer.add_scalar('val/loss', val_loss, epoch)
if epoch % 3 == 0:
save_path = os.path.join(
check_point_dir, 'proposer_checkpoint_{}.pth'.format(epoch))
torch.save(self.particle_proposer.state_dict(), save_path)
print('Saved proposer to {}'.format(save_path))
def eval_particle_proposer(self, val_loader, epoch):
""" Eval the particle proposer
Args:
val_loader: Dataloader of val dataset
Return:
val_loss:
"""
std = 0.2
mle_loss_total = 0.0
niter = 0
sta_eval = None
particles_eval = None
for i, (sta, obs, act) in enumerate(val_loader):
obs = obs.cuda().reshape(-1, 24, 24, 3).permute(0,3,1,2).float()
sta = sta.cuda()
encoding = self.observation_encoder(obs)
new_particles = self.propose_particle(encoding, \
1000, self.state_min, self.state_max)
dists = data_process.square_distance_proposer(sta.unsqueeze(0), new_particles, 1000, self.state_step_sizes_)
# Transform distances to probabilities sampled from a normal distribution
dist_probs = (1 / float(new_particles.size(0))) / ((2 * np.pi * std ** 2)**0.5) * torch.exp(-dists / (2.0 * std ** 2))
# Add e for numerical stability
e = 1e-16
# Compute most likelihood estimate loss
mle_loss = torch.mean(-torch.log(e + torch.sum(dist_probs, dim=-1)))
mle_loss_total += mle_loss.item()
niter += 1
sta_eval = sta
particles_eval = new_particles
plot_proposer('nav01', "./output/eval_" + str(epoch),\
particles_eval[:1000,...].cpu().detach().numpy(), sta_eval[0,0,...].cpu().numpy())
return mle_loss_total / niter
def propose_particle(self, encoding, num_particles, state_mins, state_maxs):
"""
Args:
encoding: output of observation encoder tensor shape: (128, )
num_particles: number of particles
state_mins: minimum values of states, numpy array of shape (1, 2)
state_maxs: maximum values of states, numpy array of shape (1, 2)
Returns:
proposed_particles: tensor of new proposed states: (N, )
"""
encoding_rep = []
for i in range(encoding.shape[0]):
encoding_rep.append((encoding[i,:].unsqueeze(0)).repeat(num_particles, 1))
encoding_rep = torch.cat(encoding_rep, dim=0)
proposed_particles = self.particle_proposer(encoding_rep)
# transform states 4 dim to 3 dim
x = proposed_particles[:, 0] * \
(state_maxs[0] - state_mins[0]) / 2.0 + (state_maxs[0] + state_mins[0]) / 2.0
y = proposed_particles[:, 1] * \
(state_maxs[1] - state_mins[1]) / 2.0 + (state_maxs[1] + state_mins[1]) / 2.0
theta = torch.atan2(proposed_particles[:, 2], proposed_particles[:, 3])
proposed_particles = torch.cat((x.unsqueeze(1), y.unsqueeze(1),\
theta.unsqueeze(1)), 1)
return proposed_particles
def train_likelihood_estimator(self):
""" Train the observation likelihood estimator l (and h)
:return:
"""
batch_size = self.trainparam['batch_size']
epochs = self.trainparam['epochs']
lr = self.trainparam['learning_rate']
self.observation_encoder = self.observation_encoder.double()
self.likelihood_estimator = self.likelihood_estimator.double()
if self.use_cuda:
self.observation_encoder = self.observation_encoder.cuda()
self.likelihood_estimator = self.likelihood_estimator.cuda()
train_loader = torch.utils.data.DataLoader(
self.train_set,
batch_size=batch_size,
shuffle=True,
num_workers=self.globalparam['workers'],
pin_memory=True,
sampler=None)
val_loader = torch.utils.data.DataLoader(
self.eval_set,
batch_size=batch_size,
shuffle=False,
num_workers=self.globalparam['workers'],
pin_memory=True)
optimizer = torch.optim.Adam(list(self.observation_encoder.parameters())+
list(self.likelihood_estimator.parameters()), lr)
log_dir = 'likelihood_estimator_log'
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
log_writer = SummaryWriter(log_dir)
check_point_dir = 'likelihood_estimator_checkpoint'
if not os.path.exists(check_point_dir):
os.makedirs(check_point_dir)
niter = 0
for epoch in range(epochs):
self.observation_encoder.train()
self.likelihood_estimator.train()
for batch_id, (sta, obs, act) in enumerate(train_loader):
if self.use_cuda:
sta = sta.cuda()
obs = obs.cuda()
w = self.get_likelihood(sta, obs)
# define loss (correct -> 1, incorrect -> 0) and optimizer
correct_item = 0
incorrect_item = 0
for batch_ind in range(w.size()[0]):
correct_samples = torch.diag(w[batch_ind])
incorrect_samples = w[batch_ind] - torch.diag(torch.diag(w[batch_ind]))
correct_item += torch.sum(-torch.log(correct_samples))
incorrect_item += torch.sum(-torch.log(1.0 - incorrect_samples))
loss = correct_item / w.size()[0] + incorrect_item / (w.size()[0]*(w.size()[0]-1))
# log and visualize
if niter % self.log_freq == 0:
print('Epoch {}/{}, Batch {}/{}: Train loss: {}'.format(epoch, epochs, batch_id, len(train_loader), loss))
log_writer.add_scalar('train/loss', loss, niter)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
niter += 1
# visualize the output of the model
if self.visualize and epoch % 10 == 0:
w = w.data.cpu().numpy()
for i in range(w.shape[0]):
plot_measurement(w[batch_id], save_image=True,
outdir='train_vis/measurement/epoch-{}'.format(epoch),
batch=batch_id, ind=i)
if epoch % self.test_freq == 0:
likelihood = self.eval_likelihood_estimator(val_loader)
print('Epoch {}: Val likelihood: {}'.format(epoch, likelihood))
log_writer.add_scalar('val/likelihood', likelihood, niter)
if epoch % 10 == 0:
save_name1 = os.path.join(
check_point_dir, 'encoder_checkpoint_{}.pth'.format(epoch))
save_name2 = os.path.join(
check_point_dir, 'estimator_checkpoint_{}.pth'.format(epoch))
torch.save(self.observation_encoder.state_dict(), save_name1)
print('Saved encoder to {}'.format(save_name1))
torch.save(self.likelihood_estimator.state_dict(), save_name2)
print('Saved estimator to {}'.format(save_name2))
def get_likelihood(self, sta, obs):
""" Process the data input and get the model output
Args:
sta: Tensor with size (N, sta_num, 3), states
obs: Tensor with size (N, obs_num, 3, H, W), observations
Returns:
w: Tensor with size (N, obs_num, sta_num).
The diagonal entries are likelihood of observations at their states.
Other entries are likelihood of observations not at their states.
"""
# obs (32, obs_num, 3, 24, 24) -> (32*obs_num, 3, 24, 24)
o = obs.permute(0, 1, 4, 2, 3)
o = o.view(-1, 3, 24, 24)
e = self.observation_encoder(o)
# get e (32*obs_num, 128)
# get all the combinations of states and observations
# -> (32, obs_num, 128)
e = e.view(obs.size()[0], obs.size()[1], -1)
# -> (32, obs_num, sta_num, 128)
e = e.view(obs.size()[0], obs.size()[1], 1, e.size()[2]).repeat(1, 1, sta.size()[1], 1)
# sta (32, sta_num, 3) -> (32, sta_num, 4)
s = torch.cat(((sta[:, :, :2] - torch.from_numpy(self.means['s'])[:2]) / torch.from_numpy(self.stds['s'])[:2],
torch.cos(sta[:, :, 2:3]), torch.sin(sta[:, :, 2:3])), -1)
# -> (32, obs_num, sta_num, 4)
s = s.view(s.size()[0], 1, s.size()[1], s.size()[2]).repeat(1, obs.shape[1], 1, 1)
# get all the combinations of states and observations
# cat_input (32, obs_num, sta_num, 132)
cat_input = torch.cat((e, s), -1)
# -> (32*obs_num*sta_num, 132)
cat_input = cat_input.view(-1, cat_input.size()[-1])
# get w (32*obs_num*sta_num, 1)
w = self.likelihood_estimator(cat_input)
# -> (32, obs_num, sta_num)
w = w.view(sta.size()[0], obs.size()[1], sta.size()[1])
return w
def eval_likelihood_estimator(self, val_loader):
""" Eval the observation encoder and likelihood estimator
Args:
val_loader: Dataloader of val dataset
Return:
likelihood
"""
likelihood_list = []
self.observation_encoder.eval()
self.likelihood_estimator.eval()
for i, (sta, obs, act) in enumerate(val_loader):
if self.use_cuda:
sta = sta.cuda()
obs = obs.cuda()
w = self.get_likelihood(sta, obs)
# calculate the likelihood
correct_item = 0
incorrect_item = 0
for batch_ind in range(w.size()[0]):
correct_samples = torch.diag(w[batch_ind])
incorrect_samples = w[batch_ind] - torch.diag(torch.diag(w[batch_ind]))
correct_item += torch.sum(torch.log(correct_samples))
incorrect_item += torch.sum(torch.log(1.0 - incorrect_samples))
likelihood = correct_item / w.size()[0] + incorrect_item / (w.size()[0] * (w.size()[0] - 1))
likelihood_list.append(math.exp(likelihood))
# visualize the output of the model
if self.visualize:
w = w.data.cpu().numpy()
for j in range(w.shape[0]):
plot_measurement(w[i], save_image=True,
outdir='eval_vis/measurement',
batch=i, ind=j)
likelihood = sum(likelihood_list) / len(likelihood_list)
return likelihood
|
14,393 | c51fa3c07c81abf9ffe23af77f8505425d62fa22 | coloring_script = '''
state = Calc.getState()
var item;
for (item = 0; item < colors_array.length; item++) {
state["expressions"]["list"][item].color = colors_array[item]
}
Calc.setState(state)
''' |
14,394 | 190e8aef5922de0824232e9046094696a68c9211 | """
Gather F5 LTM Node Information
@author: David Petzel
@date: 11/15/2011
"""
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap, GetMap
from Products.DataCollector.plugins.DataMaps import ObjectMap
import re
from ZenPacks.community.f5.lib.BigIpUtils import unpack_address_to_string
from ZenPacks.community.f5.lib.BigIpUtils import avail_status_values, enable_state_values
class BigipLtmNodeMap(SnmpPlugin):
"""
Handles the modeling of Pools on the LTM
Custom Properties Added:
zF5BigipNodesNameFilter - This will provide a list of regex strings to compare
the node name against. Only items that match will be returned.
When left blank all nodes will be returned
"""
relname = "LtmNodes"
modname = "ZenPacks.community.f5.BigipLtmNode"
deviceProperties = SnmpPlugin.deviceProperties + ('zF5BigipNodesNameFilter',)
# Column dictionaries represent the OID ending for the data point your interested in.
# This value gets appended to the base issue listed in the snmpGetTableMaps call
basecolumns = {
'.1.2': 'ltmNodeAddrAddr',
'.1.12': 'ltmNodeAddrScreenName',
}
# The node Status is provided from a separate table
status_columns = {
'.1.3': 'ltmNodeAddrStatusAvailState',
'.1.4': 'ltmNodeAddrStatusEnabledState',
'.1.6': 'ltmNodeAddrStatusDetailReason',
}
snmpGetTableMaps = (
#Virtual Server Table
GetTableMap('ltmNodeAddrTable', '.1.3.6.1.4.1.3375.2.2.4.1.2', basecolumns),
GetTableMap('ltmNodeStatusTable', '.1.3.6.1.4.1.3375.2.2.4.3.2', status_columns)
)
def condition(self, device, log):
"""
Only model pools if someone told us to.
By default we won't, but if someone has enabled pool
modeling, than do it
"""
return True
def process(self, device, results, log):
"""
Process the fetched results
"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
ltmnode_table = tabledata.get("ltmNodeAddrTable")
# Grab the second table and append it to the first
status_table = tabledata.get("ltmNodeStatusTable")
for oid, data in status_table.items():
for key, value in data.items():
if key not in ltmnode_table[oid]:
ltmnode_table[oid][key] = value
maps = []
rm = self.relMap()
# Get the list of name patterns to search for
node_name_filter = getattr(device, 'zF5BigipNodesNameFilter', None)
log.debug("Picked up Filter List of: %s" , node_name_filter)
for oid, data in ltmnode_table.items():
# log.debug("%s : %s\n", oid, data)
#
om = self.objectMap(data)
binclude = True
if node_name_filter != None and node_name_filter != "":
# If there is a regex filter supplied, lets use it
if re.search(node_name_filter, om.ltmNodeAddrScreenName) == None:
binclude = False
if binclude == True:
# The value fetched is a packed hex representation of the IP
# Try and unpack the address, and check if route_domains
# are in use
address, route_domain = unpack_address_to_string(oid,
om.ltmNodeAddrAddr)
if address != "":
om.ltmNodeAddrAddr = address
if route_domain != "":
om.ltmNodeAddrRouteDomain = route_domain
om.id = self.prepId(om.ltmNodeAddrAddr)
om.snmpindex = oid
om.ltmNodeAddrStatusEnabledState = \
enable_state_values[om.ltmNodeAddrStatusEnabledState]
om.ltmNodeAddrStatusAvailState = \
avail_status_values[om.ltmNodeAddrStatusAvailState]
rm.append(om)
log.debug(rm)
return [rm]
|
14,395 | 0867ff00827365fe0cde59b3f26653494f6d4ba5 | #!/usr/bin/env python
'''
@author Luke Campbell
@file flask_mvc/view.py
@license Apache 2.0
'''
class FlaskView(object):
pass
|
14,396 | 87816aeef1acc9c84e9e80aa020dbf6595822dad | import numpy as np
import pytest
from cortexpy.edge_set import EdgeSet
class TestIsEdge(object):
def test_with_all_true(self):
es = EdgeSet(np.ones(8))
for letter in 'acgtACGT':
assert es.is_edge(letter)
def test_with_none_true(self):
es = EdgeSet(np.zeros(8))
for letter in 'acgtACGT':
assert not es.is_edge(letter)
class TestAddEdge(object):
def test_adds_each_edge(self):
es = EdgeSet(np.zeros(8))
for letter in 'acgtACGT':
assert not es.is_edge(letter)
es.add_edge(letter)
assert es.is_edge(letter)
class TestRemoveEdge(object):
def test_removes_each_edge(self):
es = EdgeSet(np.ones(8))
for letter in 'acgtACGT':
assert es.is_edge(letter)
es.remove_edge(letter)
assert not es.is_edge(letter)
class TestGetitem(object):
def test_works(self):
es = EdgeSet(np.ones(8))
for edge_idx in range(8):
assert es[edge_idx]
class TestIncomingOutgoingEdges(object):
def test_with_all_incoming_and_no_outgiong(self):
es = EdgeSet(np.concatenate([np.ones(4), np.zeros(4)]))
assert list(es.incoming) == [1, 1, 1, 1]
assert list(es.outgoing) == [0, 0, 0, 0]
class TestIncomingOutgoingKmers(object):
def test_no_incoming_or_outgoing(self):
es = EdgeSet(np.zeros(8))
assert 0 == len(es.get_incoming_kmers('AAA'))
assert 0 == len(es.get_outgoing_kmers('AAA'))
def test_all_incoming_no_outgoing(self):
es = EdgeSet(np.concatenate([np.ones(4), np.zeros(4)]))
assert es.get_incoming_kmers('AAA') == ['AAA', 'CAA', 'GAA', 'TAA']
assert 0 == len(es.get_outgoing_kmers('AAA'))
def test_no_incoming_all_outgoing(self):
es = EdgeSet(np.concatenate([np.zeros(4), np.ones(4)]))
assert 0 == len(es.get_incoming_kmers('AAA'))
assert {'AAA', 'AAC', 'AAG', 'AAT'} == set(es.get_outgoing_kmers('AAA'))
def test_incoming_returns_lexicographically_lowest_kmers(self):
es = EdgeSet(np.zeros(8))
es.add_edge('t')
assert ['TAA'] == es.get_incoming_kmers('TAA')
def test_incoming_strings_does_not_return_lexicographically_lowest_kmers(self):
es = EdgeSet(np.zeros(8))
es.add_edge('t')
assert ['TTA'] == list(es.get_incoming_kmer_strings('TAA'))
def test_outgoing_returns_lexicographically_lowest_kmers(self):
es = EdgeSet(np.zeros(8))
es.add_edge('G')
assert ['CCG'] == es.get_outgoing_kmers('ACG')
def test_outgoing_strings_does_not_return_lexicographically_lowest_kmer(self):
es = EdgeSet(np.zeros(8))
es.add_edge('G')
assert ['CGG'] == list(es.get_outgoing_kmer_strings('ACG'))
def test_raises_on_non_lexlo_kmer(self):
es = EdgeSet(np.zeros(8))
with pytest.raises(AssertionError):
es.get_outgoing_kmers('TTT')
with pytest.raises(AssertionError):
es.get_incoming_kmers('TTT')
class TestStr(object):
def test_empty_kmer(self):
es = EdgeSet(np.zeros(8))
for as_revcomp in [True, False]:
assert es.to_str(as_revcomp=as_revcomp) == '........'
def test_with_a_and_c(self):
es = EdgeSet(np.zeros(8))
es.add_edge('A')
es.add_edge('c')
assert '.c..A...' == es.to_str()
assert '...T..g.' == es.to_str(as_revcomp=True)
|
14,397 | 23362a18e726371576598d83eb2528cc10bb0c6a | for i, j in zip(range(3), range(3)):
print(i, j)
|
14,398 | 4cf557244be414b8cb84d163cc5d895f64708057 | # -*- coding: UTF-8 -*-
from .. import mylog
from ..myexception import SpiderOneUserException
from ..myexception import RequestException
from ..myexception import GetBookNumberException
from ..myexception import GetBookPageException
import requests
from string import Template
from bs4 import BeautifulSoup
from time import sleep
import random
import os
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0',
'Referer': 'https://www.douban.com/'}
someone_collect_url = Template('https://book.douban.com/people/${userid}/collect')
someone_collect_page_url = Template(
"https://book.douban.com/people/${userid}/collect?start=${num}&sort=time&rating=all&filter=all&mode=grid")
log = mylog.get_logger('spider_book_rating', logger_name='spider_book_rating')
book_rating_log = mylog.get_logger('book_rating', logger_name='book_rating', mode='a', formater='%(message)s')
user_book_number_log = mylog.get_logger('book_number', logger_name='book_number', mode='a', formater='%(message)s')
def spider_one_user_book_rating(userid):
'''
爬取用户userid的图书评分数据
:param userid: 用户userid
'''
url = someone_collect_url.substitute(userid=userid)
page_html = ''
try:
page_html = get_pagehtml(url)
except Exception, e:
raise Exception(e)
book_number = 0
try:
book_number = get_userid_number_of_book(userid, page_html)
log.info("%s 读过 %s 本书", userid, book_number)
user_book_number_log.info("%s, %s", userid, book_number)
except Exception, e:
raise Exception(e)
try:
spider_one_user_read_books(userid, book_number)
except Exception, e:
raise Exception(e)
def get_pagehtml(url):
'''
获取url的html网页
:param url: 网页url
:return: 网页html
'''
try:
r = requests.get(url, headers=headers)
return r.text
except Exception, e:
reason = url+','+str(e)
request_exception = RequestException(reason)
log.error(request_exception)
raise request_exception
def get_userid_number_of_book(userid, page_html):
try:
page_number = get_number_of_book(page_html)
return page_number
except Exception, e:
reason = str(userid)+' '+str(e)
get_book_number_exception = GetBookNumberException(reason)
log.error(get_book_number_exception)
raise get_book_number_exception
def get_number_of_book(page_html):
'''
确定
:param page_html:
:return:
'''
index_soup = BeautifulSoup(page_html, 'lxml')
# 寻找读了多少本书的标签
try:
subject_num_tag = index_soup.find(
lambda tag:
cmp(tag.name, 'span') == 0 # span标签
and tag.has_attr('class') # 有class属性
and cmp(tag['class'][0], 'subject-num') == 0) # class属性为subject-num
except Exception, e:
reason = '无法解析读了多少本书 '+str(e)
get_book_number_exception = GetBookNumberException(reason)
raise get_book_number_exception
if subject_num_tag is not None:
# 1-15 / 158
# 确定有多少本书
num_of_books = int(subject_num_tag.string.split('/')[1].strip())
return num_of_books
else:
return 0
def spider_one_user_read_books(userid, book_number):
num_of_each_page = 15 # 每页抓取的数量
for num in range(0, book_number, num_of_each_page):
random_sleep()
url = someone_collect_page_url.substitute(userid=userid, num=num)
page_html = ''
try:
page_html = get_pagehtml(url)
except Exception, e:
reason = str(userid)+' '+url+' '+str(e)
get_book_page_exception = GetBookPageException(reason)
log.error(get_book_page_exception)
continue
log_one_page_book_rating(userid, page_html)
def log_one_page_book_rating(userid, page_html):
# 处理抓取的每一个页面
soup = BeautifulSoup(page_html, 'lxml')
for item in soup.find_all('li', class_='subject-item'):
# 获取评分标签
book_rating_tag = item.find(
lambda tag:
cmp(tag.name, 'span') == 0 # span标签
and tag.has_attr('class') # 有class属性
and tag['class'][0].startswith('rating')) # class属性以rating开头
# 判断rating是否有,对书有评价的话在操作
# bookid,bookname,rating,bookpubinfo
if book_rating_tag is not None:
bookid = item.a['href'].split('/')[4]
bookname = item.find('div', class_='info').a['title']
rating = book_rating_tag['class'][0][6]
bookpubinfo = item.find('div', class_='pub').string.strip()
book_rating_log.info('%s, %s, %s, %s, %s', userid, bookid, bookname, rating, bookpubinfo)
log.info('%s, %s, %s, %s, %s', userid, bookid, bookname, rating, bookpubinfo)
def spider_all_users(filename):
with open(filename) as f:
for line in f:
try:
spider_one_user_book_rating(line.strip())
random_sleep()
except Exception, e:
reason = line.strip()+' '+str(e)
user_failed_exception = SpiderOneUserException(reason)
log.error(user_failed_exception)
def random_sleep():
span = random.randint(2, 4)
sleep_time = span + random.random()
sleep(sleep_time)
if __name__ == '__main__':
filename = 'users_small235.csv'
package_dir = os.path.dirname(os.path.abspath(__file__))
data_file = os.path.join(package_dir, filename)
spider_all_users(data_file)
|
14,399 | 66d96c42c8b814fcf38f8fd18215e6e29efbc810 | """
Class: Stat232C
Project 3: Goal Inference
Name:Mingjia Yao
Date: May, 2020
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import copy
import math
class ValueIteration(object):
def __init__(self, transitionTable, rewardTable, valueTable, convergenceTolerance, gamma):
self.transitionTable = transitionTable
self.rewardTable = rewardTable
self.valueTable = valueTable
self.convergenceTolerance = convergenceTolerance
self.gamma = gamma
def __call__(self):
ValueTable=self.valueTable
tempValueTable=copy.copy(ValueTable)
delta=1
while delta >= self.convergenceTolerance:
delta=0
for s in ValueTable:
v=ValueTable[s]
lib_s=self.transitionTable[s]
max_a =0
for a in lib_s:
sum_sr=0
lib_a=lib_s[a]
for ns in lib_a:
sum_sr=sum_sr+lib_a[ns]*(self.rewardTable[s][a][ns]+self.gamma*tempValueTable[ns])
max_a=max(max_a,sum_sr)
ValueTable[s]=max_a
delta=max(delta,abs(v-ValueTable[s]))
tempValueTable=copy.copy(ValueTable)
policyTable={}
for s in ValueTable:
lib_s=self.transitionTable[s]
pi_s=(0,0)
vs=0
prob_ns=0
for a in lib_s:
sum_sr=0
lib_a=lib_s[a]
for ns in lib_a:
sum_sr=sum_sr+lib_a[ns]*(self.rewardTable[s][a][ns]+self.gamma*ValueTable[ns])
if sum_sr>vs:
pi_s=a
vs=sum_sr
pi_ns=(0,0)
v_ns=0
for ns in lib_a:
if ValueTable[ns]>v_ns:
pi_ns=ns
v_ns=ValueTable[ns]
prob_ns=lib_a[ns]
policyTable[s]={pi_s:prob_ns}
return ([ValueTable, policyTable])
def visualizeValueTable(gridWidth, gridHeight, goalState, trapStates, valueTable):
gridAdjust = .5
gridScale = 1.5
xs = np.linspace(-gridAdjust, gridWidth-gridAdjust, gridWidth+1)
ys = np.linspace(-gridAdjust, gridHeight-gridAdjust, gridHeight+1)
plt.rcParams["figure.figsize"] = [gridWidth*gridScale,gridHeight*gridScale]
ax = plt.gca(frameon=False, xticks = range(gridWidth), yticks = range(gridHeight))
#goal and trap coloring
ax.add_patch(Rectangle((goalState[0]-gridAdjust, goalState[1]-gridAdjust), 1, 1, fill=True, color='green', alpha=.1))
for (trapx, trapy) in trapStates:
ax.add_patch(Rectangle((trapx-gridAdjust, trapy-gridAdjust), 1, 1, fill=True, color='black', alpha=.1))
# grid lines
for x in xs:
plt.plot([x, x], [ys[0], ys[-1]], color = "black")
for y in ys:
plt.plot([xs[0], xs[-1]], [y, y], color = "black")
#labeled values
for (statex, statey), val in valueTable.items():
plt.text(statex-.2, statey, str(round(val, 3)))
plt.show()
def visualizePolicy(gridWidth, gridHeight, goalState, trapStates, policy):
#grid height/width
gridAdjust = .5
gridScale = 1.5
arrowScale = .5
xs = np.linspace(-gridAdjust, gridWidth-gridAdjust, gridWidth+1)
ys = np.linspace(-gridAdjust, gridHeight-gridAdjust, gridHeight+1)
plt.rcParams["figure.figsize"] = [gridWidth*gridScale,gridHeight*gridScale]
ax = plt.gca(frameon=False, xticks = range(gridWidth), yticks = range(gridHeight))
#goal and trap coloring
ax.add_patch(Rectangle((goalState[0]-gridAdjust, goalState[1]-gridAdjust), 1, 1, fill=True, color='green', alpha=.1))
for (trapx, trapy) in trapStates:
ax.add_patch(Rectangle((trapx-gridAdjust, trapy-gridAdjust), 1, 1, fill=True, color='black', alpha=.1))
# grid lines
for x in xs:
plt.plot([x, x], [ys[0], ys[-1]], color = "black")
for y in ys:
plt.plot([xs[0], xs[-1]], [y, y], color = "black")
#labeled values
for (statex, statey), actionDict in policy.items():
for (optimalActionX, optimalActionY), actionProb in actionDict.items():
plt.arrow(statex, statey, optimalActionX*actionProb*arrowScale, optimalActionY*actionProb*arrowScale, head_width=0.05*actionProb, head_length=0.1*actionProb)
plt.show()
def viewDictionaryStructure(d, levels, indent=0):
for key, value in d.items():
print('\t' * indent + str(levels[indent]) + ": "+ str(key))
if isinstance(value, dict):
viewDictionaryStructure(value, levels, indent+1)
else:
print('\t' * (indent+1) + str(levels[indent+1])+ ": " + str(value))
def p_traj(traj,beta,gamma,transitTable,rewardTable,valueTable):
p=1
ret=[]
for i in range(len(traj)-1):
p=p*p_stn_st(traj[i],traj[i+1],beta,gamma,transitTable,rewardTable,valueTable)
ret=ret+[p]
return ret
def p_stn_st(st,stn,beta,gamma,transitTable,rewardTable,valueTable):
lib_s=transitTable[st]
p=0
for at in lib_s:
if stn in lib_s[at]:
p=p+lib_s[at][stn]*pias(st,at,beta,gamma,transitTable,rewardTable,valueTable)
return p
def pias(st,at,beta,gamma,transitTable,rewardTable,valueTable):
return math.log(beta*q(st,at,gamma,transitTable,rewardTable,valueTable))
def q(st,at,gamma,transitTable,rewardTable,valueTable):
lib_sa=transitTable[st][at]
q=0
for ns in lib_sa:
q=q+lib_sa[ns]*(rewardTable[st][at][ns]+gamma*valueTable[ns])
return q
def main():
gamma = .95
beta = .4
convergenceTolerance = 10e-7
transition = {(0, 0): {(1, 0): {(1, 0): 1},(0, 1): {(0, 1): 1},(-1, 0): {(0, 0): 1},(0, -1): {(0, 0): 1},(-1, 1): {(0, 0): 1},(1, -1): {(0, 0): 1},(1, 1): {(1, 1): 1},(-1, -1): {(0, 0): 1}},(0, 1): {(1, 0): {(1, 1): 1},(0, 1): {(0, 2): 1},(-1, 0): {(0, 1): 1},(0, -1): {(0, 0): 1},(-1, 1): {(0, 1): 1},(1, -1): {(1, 0): 1},(1, 1): {(1, 2): 1},(-1, -1): {(0, 1): 1}},(0, 2): {(1, 0): {(1, 2): 1},(0, 1): {(0, 3): 1},(-1, 0): {(0, 2): 1},(0, -1): {(0, 1): 1},(-1, 1): {(0, 2): 1},(1, -1): {(1, 1): 1},(1, 1): {(1, 3): 1},(-1, -1): {(0, 2): 1}},(0, 3): {(1, 0): {(1, 3): 1},(0, 1): {(0, 4): 1},(-1, 0): {(0, 3): 1},(0, -1): {(0, 2): 1},(-1, 1): {(0, 3): 1},(1, -1): {(1, 2): 1},(1, 1): {(1, 4): 1},(-1, -1): {(0, 3): 1}},(0, 4): {(1, 0): {(1, 4): 1},(0, 1): {(0, 5): 1},(-1, 0): {(0, 4): 1},(0, -1): {(0, 3): 1},(-1, 1): {(0, 4): 1},(1, -1): {(1, 3): 1},(1, 1): {(1, 5): 1},(-1, -1): {(0, 4): 1}},(0, 5): {(1, 0): {(1, 5): 1},(0, 1): {(0, 5): 1},(-1, 0): {(0, 5): 1},(0, -1): {(0, 4): 1},(-1, 1): {(0, 5): 1},(1, -1): {(1, 4): 1},(1, 1): {(0, 5): 1},(-1, -1): {(0, 5): 1}},(1, 0): {(1, 0): {(2, 0): 1},(0, 1): {(1, 1): 1},(-1, 0): {(0, 0): 1},(0, -1): {(1, 0): 1},(-1, 1): {(0, 1): 1},(1, -1): {(1, 0): 1},(1, 1): {(2, 1): 1},(-1, -1): {(1, 0): 1}},(1, 1): {(1, 0): {(2, 1): 1},(0, 1): {(1, 2): 1},(-1, 0): {(0, 1): 1},(0, -1): {(1, 0): 1},(-1, 1): {(0, 2): 1},(1, -1): {(2, 0): 1},(1, 1): {(2, 2): 1},(-1, -1): {(0, 0): 1}},(1, 2): {(1, 0): {(2, 2): 1},(0, 1): {(1, 3): 1},(-1, 0): {(0, 2): 1},(0, -1): {(1, 1): 1},(-1, 1): {(0, 3): 1},(1, -1): {(2, 1): 1},(1, 1): {(2, 3): 1},(-1, -1): {(0, 1): 1}},(1, 3): {(1, 0): {(2, 3): 1},(0, 1): {(1, 4): 1},(-1, 0): {(0, 3): 1},(0, -1): {(1, 2): 1},(-1, 1): {(0, 4): 1},(1, -1): {(2, 2): 1},(1, 1): {(2, 4): 1},(-1, -1): {(0, 2): 1}},(1, 4): {(1, 0): {(2, 4): 1},(0, 1): {(1, 5): 1},(-1, 0): {(0, 4): 1},(0, -1): {(1, 3): 1},(-1, 1): {(0, 5): 1},(1, -1): {(2, 3): 1},(1, 1): {(2, 5): 1},(-1, -1): {(0, 3): 1}},(1, 5): {(1, 0): {(2, 5): 1},(0, 1): {(1, 5): 1},(-1, 0): {(0, 5): 1},(0, -1): {(1, 4): 1},(-1, 1): {(1, 5): 1},(1, -1): {(2, 4): 1},(1, 1): {(1, 5): 1},(-1, -1): {(0, 4): 1}},(2, 0): {(1, 0): {(3, 0): 1},(0, 1): {(2, 1): 1},(-1, 0): {(1, 0): 1},(0, -1): {(2, 0): 1},(-1, 1): {(1, 1): 1},(1, -1): {(2, 0): 1},(1, 1): {(3, 1): 1},(-1, -1): {(2, 0): 1}},(2, 1): {(1, 0): {(3, 1): 1},(0, 1): {(2, 2): 1},(-1, 0): {(1, 1): 1},(0, -1): {(2, 0): 1},(-1, 1): {(1, 2): 1},(1, -1): {(3, 0): 1},(1, 1): {(3, 2): 1},(-1, -1): {(1, 0): 1}},(2, 2): {(1, 0): {(3, 2): 1},(0, 1): {(2, 3): 1},(-1, 0): {(1, 2): 1},(0, -1): {(2, 1): 1},(-1, 1): {(1, 3): 1},(1, -1): {(3, 1): 1},(1, 1): {(3, 3): 1},(-1, -1): {(1, 1): 1}},(2, 3): {(1, 0): {(3, 3): 1},(0, 1): {(2, 4): 1},(-1, 0): {(1, 3): 1},(0, -1): {(2, 2): 1},(-1, 1): {(1, 4): 1},(1, -1): {(3, 2): 1},(1, 1): {(3, 4): 1},(-1, -1): {(1, 2): 1}},(2, 4): {(1, 0): {(3, 4): 1},(0, 1): {(2, 5): 1},(-1, 0): {(1, 4): 1},(0, -1): {(2, 3): 1},(-1, 1): {(1, 5): 1},(1, -1): {(3, 3): 1},(1, 1): {(3, 5): 1},(-1, -1): {(1, 3): 1}},(2, 5): {(1, 0): {(3, 5): 1},(0, 1): {(2, 5): 1},(-1, 0): {(1, 5): 1},(0, -1): {(2, 4): 1},(-1, 1): {(2, 5): 1},(1, -1): {(3, 4): 1},(1, 1): {(2, 5): 1},(-1, -1): {(1, 4): 1}},(3, 0): {(1, 0): {(4, 0): 1},(0, 1): {(3, 1): 1},(-1, 0): {(2, 0): 1},(0, -1): {(3, 0): 1},(-1, 1): {(2, 1): 1},(1, -1): {(3, 0): 1},(1, 1): {(4, 1): 1},(-1, -1): {(3, 0): 1}},(3, 1): {(1, 0): {(4, 1): 1},(0, 1): {(3, 2): 1},(-1, 0): {(2, 1): 1},(0, -1): {(3, 0): 1},(-1, 1): {(2, 2): 1},(1, -1): {(4, 0): 1},(1, 1): {(4, 2): 1},(-1, -1): {(2, 0): 1}},(3, 2): {(1, 0): {(4, 2): 1},(0, 1): {(3, 3): 1},(-1, 0): {(2, 2): 1},(0, -1): {(3, 1): 1},(-1, 1): {(2, 3): 1},(1, -1): {(4, 1): 1},(1, 1): {(4, 3): 1},(-1, -1): {(2, 1): 1}},(3, 3): {(1, 0): {(4, 3): 1},(0, 1): {(3, 4): 1},(-1, 0): {(2, 3): 1},(0, -1): {(3, 2): 1},(-1, 1): {(2, 4): 1},(1, -1): {(4, 2): 1},(1, 1): {(4, 4): 1},(-1, -1): {(2, 2): 1}},(3, 4): {(1, 0): {(4, 4): 1},(0, 1): {(3, 5): 1},(-1, 0): {(2, 4): 1},(0, -1): {(3, 3): 1},(-1, 1): {(2, 5): 1},(1, -1): {(4, 3): 1},(1, 1): {(4, 5): 1},(-1, -1): {(2, 3): 1}},(3, 5): {(1, 0): {(4, 5): 1},(0, 1): {(3, 5): 1},(-1, 0): {(2, 5): 1},(0, -1): {(3, 4): 1},(-1, 1): {(3, 5): 1},(1, -1): {(4, 4): 1},(1, 1): {(3, 5): 1},(-1, -1): {(2, 4): 1}},(4, 0): {(1, 0): {(5, 0): 1},(0, 1): {(4, 1): 1},(-1, 0): {(3, 0): 1},(0, -1): {(4, 0): 1},(-1, 1): {(3, 1): 1},(1, -1): {(4, 0): 1},(1, 1): {(5, 1): 1},(-1, -1): {(4, 0): 1}},(4, 1): {(1, 0): {(5, 1): 1},(0, 1): {(4, 2): 1},(-1, 0): {(3, 1): 1},(0, -1): {(4, 0): 1},(-1, 1): {(3, 2): 1},(1, -1): {(5, 0): 1},(1, 1): {(5, 2): 1},(-1, -1): {(3, 0): 1}},(4, 2): {(1, 0): {(5, 2): 1},(0, 1): {(4, 3): 1},(-1, 0): {(3, 2): 1},(0, -1): {(4, 1): 1},(-1, 1): {(3, 3): 1},(1, -1): {(5, 1): 1},(1, 1): {(5, 3): 1},(-1, -1): {(3, 1): 1}},(4, 3): {(1, 0): {(5, 3): 1},(0, 1): {(4, 4): 1},(-1, 0): {(3, 3): 1},(0, -1): {(4, 2): 1},(-1, 1): {(3, 4): 1},(1, -1): {(5, 2): 1},(1, 1): {(5, 4): 1},(-1, -1): {(3, 2): 1}},(4, 4): {(1, 0): {(5, 4): 1},(0, 1): {(4, 5): 1},(-1, 0): {(3, 4): 1},(0, -1): {(4, 3): 1},(-1, 1): {(3, 5): 1},(1, -1): {(5, 3): 1},(1, 1): {(5, 5): 1},(-1, -1): {(3, 3): 1}},(4, 5): {(1, 0): {(5, 5): 1},(0, 1): {(4, 5): 1},(-1, 0): {(3, 5): 1},(0, -1): {(4, 4): 1},(-1, 1): {(4, 5): 1},(1, -1): {(5, 4): 1},(1, 1): {(4, 5): 1},(-1, -1): {(3, 4): 1}},(5, 0): {(1, 0): {(6, 0): 1},(0, 1): {(5, 1): 1},(-1, 0): {(4, 0): 1},(0, -1): {(5, 0): 1},(-1, 1): {(4, 1): 1},(1, -1): {(5, 0): 1},(1, 1): {(6, 1): 1},(-1, -1): {(5, 0): 1}},(5, 1): {(1, 0): {(6, 1): 1},(0, 1): {(5, 2): 1},(-1, 0): {(4, 1): 1},(0, -1): {(5, 0): 1},(-1, 1): {(4, 2): 1},(1, -1): {(6, 0): 1},(1, 1): {(6, 2): 1},(-1, -1): {(4, 0): 1}},(5, 2): {(1, 0): {(6, 2): 1},(0, 1): {(5, 3): 1},(-1, 0): {(4, 2): 1},(0, -1): {(5, 1): 1},(-1, 1): {(4, 3): 1},(1, -1): {(6, 1): 1},(1, 1): {(6, 3): 1},(-1, -1): {(4, 1): 1}},(5, 3): {(1, 0): {(6, 3): 1},(0, 1): {(5, 4): 1},(-1, 0): {(4, 3): 1},(0, -1): {(5, 2): 1},(-1, 1): {(4, 4): 1},(1, -1): {(6, 2): 1},(1, 1): {(6, 4): 1},(-1, -1): {(4, 2): 1}},(5, 4): {(1, 0): {(6, 4): 1},(0, 1): {(5, 5): 1},(-1, 0): {(4, 4): 1},(0, -1): {(5, 3): 1},(-1, 1): {(4, 5): 1},(1, -1): {(6, 3): 1},(1, 1): {(6, 5): 1},(-1, -1): {(4, 3): 1}},(5, 5): {(1, 0): {(6, 5): 1},(0, 1): {(5, 5): 1},(-1, 0): {(4, 5): 1},(0, -1): {(5, 4): 1},(-1, 1): {(5, 5): 1},(1, -1): {(6, 4): 1},(1, 1): {(5, 5): 1},(-1, -1): {(4, 4): 1}},(6, 0): {(1, 0): {(6, 0): 1},(0, 1): {(6, 1): 1},(-1, 0): {(5, 0): 1},(0, -1): {(6, 0): 1},(-1, 1): {(5, 1): 1},(1, -1): {(6, 0): 1},(1, 1): {(6, 0): 1},(-1, -1): {(6, 0): 1}},(6, 1): {(1, 0): {(6, 1): 1},(0, 1): {(6, 2): 1},(-1, 0): {(5, 1): 1},(0, -1): {(6, 0): 1},(-1, 1): {(5, 2): 1},(1, -1): {(6, 1): 1},(1, 1): {(6, 1): 1},(-1, -1): {(5, 0): 1}},(6, 2): {(1, 0): {(6, 2): 1},(0, 1): {(6, 3): 1},(-1, 0): {(5, 2): 1},(0, -1): {(6, 1): 1},(-1, 1): {(5, 3): 1},(1, -1): {(6, 2): 1},(1, 1): {(6, 2): 1},(-1, -1): {(5, 1): 1}},(6, 3): {(1, 0): {(6, 3): 1},(0, 1): {(6, 4): 1},(-1, 0): {(5, 3): 1},(0, -1): {(6, 2): 1},(-1, 1): {(5, 4): 1},(1, -1): {(6, 3): 1},(1, 1): {(6, 3): 1},(-1, -1): {(5, 2): 1}},(6, 4): {(1, 0): {(6, 4): 1},(0, 1): {(6, 5): 1},(-1, 0): {(5, 4): 1},(0, -1): {(6, 3): 1},(-1, 1): {(5, 5): 1},(1, -1): {(6, 4): 1},(1, 1): {(6, 4): 1},(-1, -1): {(5, 3): 1}},(6, 5): {(1, 0): {(6, 5): 1},(0, 1): {(6, 5): 1},(-1, 0): {(5, 5): 1},(0, -1): {(6, 4): 1},(-1, 1): {(6, 5): 1},(1, -1): {(6, 5): 1},(1, 1): {(6, 5): 1},(-1, -1): {(5, 4): 1}}}
valueTable = {(0, 0): 0,(0, 1): 0,(0, 2): 0,(0, 3): 0,(0, 4): 0,(0, 5): 0,(1, 0): 0,(1, 1): 0,(1, 2): 0,(1, 3): 0,(1, 4): 0,(1, 5): 0,(2, 0): 0,(2, 1): 0,(2, 2): 0,(2, 3): 0,(2, 4): 0,(2, 5): 0,(3, 0): 0,(3, 1): 0,(3, 2): 0,(3, 3): 0,(3, 4): 0,(3, 5): 0,(4, 0): 0,(4, 1): 0,(4, 2): 0,(4, 3): 0,(4, 4): 0,(4, 5): 0,(5, 0): 0,(5, 1): 0,(5, 2): 0,(5, 3): 0,(5, 4): 0,(5, 5): 0,(6, 0): 0,(6, 1): 0,(6, 2): 0,(6, 3): 0,(6, 4): 0,(6, 5): 0}
#Observed Trajectories
trajectoryToGoalA = [(0,0), (1,1), (1,2), (2,3), (3,4), (4,4), (5,4), (6,4)]
trajectoryToGoalB = [(0,0), (1,1), (2,2), (2,3), (3,4), (4,3), (5,2), (6,1)]
trajectoryToGoalC = [(0,0), (0,1), (1,2), (1,3), (1,4), (1,5)]
#Environment 1: Solid Barrier
rewardA = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): -1.0},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): -1.0},(0, -1): {(1, 4): -1.0},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): -1.4142135623730951},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): -1.4142135623730951}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -100},(0, 1): {(3, 2): -100},(-1, 0): {(2, 1): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 2): -100},(1, -1): {(4, 0): -100},(1, 1): {(4, 2): -100},(-1, -1): {(2, 0): -100}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): -1.0},(-1, 0): {(5, 1): -1.0},(0, -1): {(6, 0): -1.0},(-1, 1): {(5, 2): -1.4142135623730951},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): -1.4142135623730951}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): 10},(-1, 0): {(5, 4): 10},(0, -1): {(6, 3): 10},(-1, 1): {(5, 5): 10},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): 10}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
rewardB = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): -1.0},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): -1.0},(0, -1): {(1, 4): -1.0},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): -1.4142135623730951},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): -1.4142135623730951}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -100},(0, 1): {(3, 2): -100},(-1, 0): {(2, 1): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 2): -100},(1, -1): {(4, 0): -100},(1, 1): {(4, 2): -100},(-1, -1): {(2, 0): -100}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): 10},(-1, 0): {(5, 1): 10},(0, -1): {(6, 0): 10},(-1, 1): {(5, 2): 10},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): 10}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): -1.0},(-1, 0): {(5, 4): -1.0},(0, -1): {(6, 3): -1.0},(-1, 1): {(5, 5): -1.4142135623730951},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): -1.4142135623730951}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
rewardC = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): 10},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): 10},(0, -1): {(1, 4): 10},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): 10},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): 10}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -100},(0, 1): {(3, 2): -100},(-1, 0): {(2, 1): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 2): -100},(1, -1): {(4, 0): -100},(1, 1): {(4, 2): -100},(-1, -1): {(2, 0): -100}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): -1.0},(-1, 0): {(5, 1): -1.0},(0, -1): {(6, 0): -1.0},(-1, 1): {(5, 2): -1.4142135623730951},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): -1.4142135623730951}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): -1.0},(-1, 0): {(5, 4): -1.0},(0, -1): {(6, 3): -1.0},(-1, 1): {(5, 5): -1.4142135623730951},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): -1.4142135623730951}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
#Environment 2: Barrier with a Gap
rewardAGap = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): -1.0},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): -1.0},(0, -1): {(1, 4): -1.0},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): -1.4142135623730951},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): -1.4142135623730951}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -1.0},(0, 1): {(3, 2): -1.0},(-1, 0): {(2, 1): -1.0},(0, -1): {(3, 0): -1.0},(-1, 1): {(2, 2): -1.4142135623730951},(1, -1): {(4, 0): -1.4142135623730951},(1, 1): {(4, 2): -1.4142135623730951},(-1, -1): {(2, 0): -1.4142135623730951}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): -1.0},(-1, 0): {(5, 1): -1.0},(0, -1): {(6, 0): -1.0},(-1, 1): {(5, 2): -1.4142135623730951},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): -1.4142135623730951}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): 10},(-1, 0): {(5, 4): 10},(0, -1): {(6, 3): 10},(-1, 1): {(5, 5): 10},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): 10}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
rewardBGap = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): -1.0},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): -1.0},(0, -1): {(1, 4): -1.0},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): -1.4142135623730951},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): -1.4142135623730951}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -1.0},(0, 1): {(3, 2): -1.0},(-1, 0): {(2, 1): -1.0},(0, -1): {(3, 0): -1.0},(-1, 1): {(2, 2): -1.4142135623730951},(1, -1): {(4, 0): -1.4142135623730951},(1, 1): {(4, 2): -1.4142135623730951},(-1, -1): {(2, 0): -1.4142135623730951}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): 10},(-1, 0): {(5, 1): 10},(0, -1): {(6, 0): 10},(-1, 1): {(5, 2): 10},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): 10}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): -1.0},(-1, 0): {(5, 4): -1.0},(0, -1): {(6, 3): -1.0},(-1, 1): {(5, 5): -1.4142135623730951},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): -1.4142135623730951}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
rewardCGap = {(0, 0): {(1, 0): {(1, 0): -1.0},(0, 1): {(0, 1): -1.0},(-1, 0): {(0, 0): -1},(0, -1): {(0, 0): -1},(-1, 1): {(0, 0): -1},(1, -1): {(0, 0): -1},(1, 1): {(1, 1): -1.4142135623730951},(-1, -1): {(0, 0): -1}},(0, 1): {(1, 0): {(1, 1): -1.0},(0, 1): {(0, 2): -1.0},(-1, 0): {(0, 1): -1},(0, -1): {(0, 0): -1.0},(-1, 1): {(0, 1): -1},(1, -1): {(1, 0): -1.4142135623730951},(1, 1): {(1, 2): -1.4142135623730951},(-1, -1): {(0, 1): -1}},(0, 2): {(1, 0): {(1, 2): -1.0},(0, 1): {(0, 3): -1.0},(-1, 0): {(0, 2): -1},(0, -1): {(0, 1): -1.0},(-1, 1): {(0, 2): -1},(1, -1): {(1, 1): -1.4142135623730951},(1, 1): {(1, 3): -1.4142135623730951},(-1, -1): {(0, 2): -1}},(0, 3): {(1, 0): {(1, 3): -1.0},(0, 1): {(0, 4): -1.0},(-1, 0): {(0, 3): -1},(0, -1): {(0, 2): -1.0},(-1, 1): {(0, 3): -1},(1, -1): {(1, 2): -1.4142135623730951},(1, 1): {(1, 4): -1.4142135623730951},(-1, -1): {(0, 3): -1}},(0, 4): {(1, 0): {(1, 4): -1.0},(0, 1): {(0, 5): -1.0},(-1, 0): {(0, 4): -1},(0, -1): {(0, 3): -1.0},(-1, 1): {(0, 4): -1},(1, -1): {(1, 3): -1.4142135623730951},(1, 1): {(1, 5): -1.4142135623730951},(-1, -1): {(0, 4): -1}},(0, 5): {(1, 0): {(1, 5): -1.0},(0, 1): {(0, 5): -1},(-1, 0): {(0, 5): -1},(0, -1): {(0, 4): -1.0},(-1, 1): {(0, 5): -1},(1, -1): {(1, 4): -1.4142135623730951},(1, 1): {(0, 5): -1},(-1, -1): {(0, 5): -1}},(1, 0): {(1, 0): {(2, 0): -1.0},(0, 1): {(1, 1): -1.0},(-1, 0): {(0, 0): -1.0},(0, -1): {(1, 0): -1},(-1, 1): {(0, 1): -1.4142135623730951},(1, -1): {(1, 0): -1},(1, 1): {(2, 1): -1.4142135623730951},(-1, -1): {(1, 0): -1}},(1, 1): {(1, 0): {(2, 1): -1.0},(0, 1): {(1, 2): -1.0},(-1, 0): {(0, 1): -1.0},(0, -1): {(1, 0): -1.0},(-1, 1): {(0, 2): -1.4142135623730951},(1, -1): {(2, 0): -1.4142135623730951},(1, 1): {(2, 2): -1.4142135623730951},(-1, -1): {(0, 0): -1.4142135623730951}},(1, 2): {(1, 0): {(2, 2): -1.0},(0, 1): {(1, 3): -1.0},(-1, 0): {(0, 2): -1.0},(0, -1): {(1, 1): -1.0},(-1, 1): {(0, 3): -1.4142135623730951},(1, -1): {(2, 1): -1.4142135623730951},(1, 1): {(2, 3): -1.4142135623730951},(-1, -1): {(0, 1): -1.4142135623730951}},(1, 3): {(1, 0): {(2, 3): -1.0},(0, 1): {(1, 4): -1.0},(-1, 0): {(0, 3): -1.0},(0, -1): {(1, 2): -1.0},(-1, 1): {(0, 4): -1.4142135623730951},(1, -1): {(2, 2): -1.4142135623730951},(1, 1): {(2, 4): -1.4142135623730951},(-1, -1): {(0, 2): -1.4142135623730951}},(1, 4): {(1, 0): {(2, 4): -1.0},(0, 1): {(1, 5): -1.0},(-1, 0): {(0, 4): -1.0},(0, -1): {(1, 3): -1.0},(-1, 1): {(0, 5): -1.4142135623730951},(1, -1): {(2, 3): -1.4142135623730951},(1, 1): {(2, 5): -1.4142135623730951},(-1, -1): {(0, 3): -1.4142135623730951}},(1, 5): {(1, 0): {(2, 5): 10},(0, 1): {(1, 5): -1},(-1, 0): {(0, 5): 10},(0, -1): {(1, 4): 10},(-1, 1): {(1, 5): -1},(1, -1): {(2, 4): 10},(1, 1): {(1, 5): -1},(-1, -1): {(0, 4): 10}},(2, 0): {(1, 0): {(3, 0): -1.0},(0, 1): {(2, 1): -1.0},(-1, 0): {(1, 0): -1.0},(0, -1): {(2, 0): -1},(-1, 1): {(1, 1): -1.4142135623730951},(1, -1): {(2, 0): -1},(1, 1): {(3, 1): -1.4142135623730951},(-1, -1): {(2, 0): -1}},(2, 1): {(1, 0): {(3, 1): -1.0},(0, 1): {(2, 2): -1.0},(-1, 0): {(1, 1): -1.0},(0, -1): {(2, 0): -1.0},(-1, 1): {(1, 2): -1.4142135623730951},(1, -1): {(3, 0): -1.4142135623730951},(1, 1): {(3, 2): -1.4142135623730951},(-1, -1): {(1, 0): -1.4142135623730951}},(2, 2): {(1, 0): {(3, 2): -1.0},(0, 1): {(2, 3): -1.0},(-1, 0): {(1, 2): -1.0},(0, -1): {(2, 1): -1.0},(-1, 1): {(1, 3): -1.4142135623730951},(1, -1): {(3, 1): -1.4142135623730951},(1, 1): {(3, 3): -1.4142135623730951},(-1, -1): {(1, 1): -1.4142135623730951}},(2, 3): {(1, 0): {(3, 3): -1.0},(0, 1): {(2, 4): -1.0},(-1, 0): {(1, 3): -1.0},(0, -1): {(2, 2): -1.0},(-1, 1): {(1, 4): -1.4142135623730951},(1, -1): {(3, 2): -1.4142135623730951},(1, 1): {(3, 4): -1.4142135623730951},(-1, -1): {(1, 2): -1.4142135623730951}},(2, 4): {(1, 0): {(3, 4): -1.0},(0, 1): {(2, 5): -1.0},(-1, 0): {(1, 4): -1.0},(0, -1): {(2, 3): -1.0},(-1, 1): {(1, 5): -1.4142135623730951},(1, -1): {(3, 3): -1.4142135623730951},(1, 1): {(3, 5): -1.4142135623730951},(-1, -1): {(1, 3): -1.4142135623730951}},(2, 5): {(1, 0): {(3, 5): -1.0},(0, 1): {(2, 5): -1},(-1, 0): {(1, 5): -1.0},(0, -1): {(2, 4): -1.0},(-1, 1): {(2, 5): -1},(1, -1): {(3, 4): -1.4142135623730951},(1, 1): {(2, 5): -1},(-1, -1): {(1, 4): -1.4142135623730951}},(3, 0): {(1, 0): {(4, 0): -100},(0, 1): {(3, 1): -100},(-1, 0): {(2, 0): -100},(0, -1): {(3, 0): -100},(-1, 1): {(2, 1): -100},(1, -1): {(3, 0): -100},(1, 1): {(4, 1): -100},(-1, -1): {(3, 0): -100}},(3, 1): {(1, 0): {(4, 1): -1.0},(0, 1): {(3, 2): -1.0},(-1, 0): {(2, 1): -1.0},(0, -1): {(3, 0): -1.0},(-1, 1): {(2, 2): -1.4142135623730951},(1, -1): {(4, 0): -1.4142135623730951},(1, 1): {(4, 2): -1.4142135623730951},(-1, -1): {(2, 0): -1.4142135623730951}},(3, 2): {(1, 0): {(4, 2): -100},(0, 1): {(3, 3): -100},(-1, 0): {(2, 2): -100},(0, -1): {(3, 1): -100},(-1, 1): {(2, 3): -100},(1, -1): {(4, 1): -100},(1, 1): {(4, 3): -100},(-1, -1): {(2, 1): -100}},(3, 3): {(1, 0): {(4, 3): -100},(0, 1): {(3, 4): -100},(-1, 0): {(2, 3): -100},(0, -1): {(3, 2): -100},(-1, 1): {(2, 4): -100},(1, -1): {(4, 2): -100},(1, 1): {(4, 4): -100},(-1, -1): {(2, 2): -100}},(3, 4): {(1, 0): {(4, 4): -1.0},(0, 1): {(3, 5): -1.0},(-1, 0): {(2, 4): -1.0},(0, -1): {(3, 3): -1.0},(-1, 1): {(2, 5): -1.4142135623730951},(1, -1): {(4, 3): -1.4142135623730951},(1, 1): {(4, 5): -1.4142135623730951},(-1, -1): {(2, 3): -1.4142135623730951}},(3, 5): {(1, 0): {(4, 5): -1.0},(0, 1): {(3, 5): -1},(-1, 0): {(2, 5): -1.0},(0, -1): {(3, 4): -1.0},(-1, 1): {(3, 5): -1},(1, -1): {(4, 4): -1.4142135623730951},(1, 1): {(3, 5): -1},(-1, -1): {(2, 4): -1.4142135623730951}},(4, 0): {(1, 0): {(5, 0): -1.0},(0, 1): {(4, 1): -1.0},(-1, 0): {(3, 0): -1.0},(0, -1): {(4, 0): -1},(-1, 1): {(3, 1): -1.4142135623730951},(1, -1): {(4, 0): -1},(1, 1): {(5, 1): -1.4142135623730951},(-1, -1): {(4, 0): -1}},(4, 1): {(1, 0): {(5, 1): -1.0},(0, 1): {(4, 2): -1.0},(-1, 0): {(3, 1): -1.0},(0, -1): {(4, 0): -1.0},(-1, 1): {(3, 2): -1.4142135623730951},(1, -1): {(5, 0): -1.4142135623730951},(1, 1): {(5, 2): -1.4142135623730951},(-1, -1): {(3, 0): -1.4142135623730951}},(4, 2): {(1, 0): {(5, 2): -1.0},(0, 1): {(4, 3): -1.0},(-1, 0): {(3, 2): -1.0},(0, -1): {(4, 1): -1.0},(-1, 1): {(3, 3): -1.4142135623730951},(1, -1): {(5, 1): -1.4142135623730951},(1, 1): {(5, 3): -1.4142135623730951},(-1, -1): {(3, 1): -1.4142135623730951}},(4, 3): {(1, 0): {(5, 3): -1.0},(0, 1): {(4, 4): -1.0},(-1, 0): {(3, 3): -1.0},(0, -1): {(4, 2): -1.0},(-1, 1): {(3, 4): -1.4142135623730951},(1, -1): {(5, 2): -1.4142135623730951},(1, 1): {(5, 4): -1.4142135623730951},(-1, -1): {(3, 2): -1.4142135623730951}},(4, 4): {(1, 0): {(5, 4): -1.0},(0, 1): {(4, 5): -1.0},(-1, 0): {(3, 4): -1.0},(0, -1): {(4, 3): -1.0},(-1, 1): {(3, 5): -1.4142135623730951},(1, -1): {(5, 3): -1.4142135623730951},(1, 1): {(5, 5): -1.4142135623730951},(-1, -1): {(3, 3): -1.4142135623730951}},(4, 5): {(1, 0): {(5, 5): -1.0},(0, 1): {(4, 5): -1},(-1, 0): {(3, 5): -1.0},(0, -1): {(4, 4): -1.0},(-1, 1): {(4, 5): -1},(1, -1): {(5, 4): -1.4142135623730951},(1, 1): {(4, 5): -1},(-1, -1): {(3, 4): -1.4142135623730951}},(5, 0): {(1, 0): {(6, 0): -1.0},(0, 1): {(5, 1): -1.0},(-1, 0): {(4, 0): -1.0},(0, -1): {(5, 0): -1},(-1, 1): {(4, 1): -1.4142135623730951},(1, -1): {(5, 0): -1},(1, 1): {(6, 1): -1.4142135623730951},(-1, -1): {(5, 0): -1}},(5, 1): {(1, 0): {(6, 1): -1.0},(0, 1): {(5, 2): -1.0},(-1, 0): {(4, 1): -1.0},(0, -1): {(5, 0): -1.0},(-1, 1): {(4, 2): -1.4142135623730951},(1, -1): {(6, 0): -1.4142135623730951},(1, 1): {(6, 2): -1.4142135623730951},(-1, -1): {(4, 0): -1.4142135623730951}},(5, 2): {(1, 0): {(6, 2): -1.0},(0, 1): {(5, 3): -1.0},(-1, 0): {(4, 2): -1.0},(0, -1): {(5, 1): -1.0},(-1, 1): {(4, 3): -1.4142135623730951},(1, -1): {(6, 1): -1.4142135623730951},(1, 1): {(6, 3): -1.4142135623730951},(-1, -1): {(4, 1): -1.4142135623730951}},(5, 3): {(1, 0): {(6, 3): -1.0},(0, 1): {(5, 4): -1.0},(-1, 0): {(4, 3): -1.0},(0, -1): {(5, 2): -1.0},(-1, 1): {(4, 4): -1.4142135623730951},(1, -1): {(6, 2): -1.4142135623730951},(1, 1): {(6, 4): -1.4142135623730951},(-1, -1): {(4, 2): -1.4142135623730951}},(5, 4): {(1, 0): {(6, 4): -1.0},(0, 1): {(5, 5): -1.0},(-1, 0): {(4, 4): -1.0},(0, -1): {(5, 3): -1.0},(-1, 1): {(4, 5): -1.4142135623730951},(1, -1): {(6, 3): -1.4142135623730951},(1, 1): {(6, 5): -1.4142135623730951},(-1, -1): {(4, 3): -1.4142135623730951}},(5, 5): {(1, 0): {(6, 5): -1.0},(0, 1): {(5, 5): -1},(-1, 0): {(4, 5): -1.0},(0, -1): {(5, 4): -1.0},(-1, 1): {(5, 5): -1},(1, -1): {(6, 4): -1.4142135623730951},(1, 1): {(5, 5): -1},(-1, -1): {(4, 4): -1.4142135623730951}},(6, 0): {(1, 0): {(6, 0): -1},(0, 1): {(6, 1): -1.0},(-1, 0): {(5, 0): -1.0},(0, -1): {(6, 0): -1},(-1, 1): {(5, 1): -1.4142135623730951},(1, -1): {(6, 0): -1},(1, 1): {(6, 0): -1},(-1, -1): {(6, 0): -1}},(6, 1): {(1, 0): {(6, 1): -1},(0, 1): {(6, 2): -1.0},(-1, 0): {(5, 1): -1.0},(0, -1): {(6, 0): -1.0},(-1, 1): {(5, 2): -1.4142135623730951},(1, -1): {(6, 1): -1},(1, 1): {(6, 1): -1},(-1, -1): {(5, 0): -1.4142135623730951}},(6, 2): {(1, 0): {(6, 2): -1},(0, 1): {(6, 3): -1.0},(-1, 0): {(5, 2): -1.0},(0, -1): {(6, 1): -1.0},(-1, 1): {(5, 3): -1.4142135623730951},(1, -1): {(6, 2): -1},(1, 1): {(6, 2): -1},(-1, -1): {(5, 1): -1.4142135623730951}},(6, 3): {(1, 0): {(6, 3): -1},(0, 1): {(6, 4): -1.0},(-1, 0): {(5, 3): -1.0},(0, -1): {(6, 2): -1.0},(-1, 1): {(5, 4): -1.4142135623730951},(1, -1): {(6, 3): -1},(1, 1): {(6, 3): -1},(-1, -1): {(5, 2): -1.4142135623730951}},(6, 4): {(1, 0): {(6, 4): -1},(0, 1): {(6, 5): -1.0},(-1, 0): {(5, 4): -1.0},(0, -1): {(6, 3): -1.0},(-1, 1): {(5, 5): -1.4142135623730951},(1, -1): {(6, 4): -1},(1, 1): {(6, 4): -1},(-1, -1): {(5, 3): -1.4142135623730951}},(6, 5): {(1, 0): {(6, 5): -1},(0, 1): {(6, 5): -1},(-1, 0): {(5, 5): -1.0},(0, -1): {(6, 4): -1.0},(-1, 1): {(6, 5): -1},(1, -1): {(6, 5): -1},(1, 1): {(6, 5): -1},(-1, -1): {(5, 4): -1.4142135623730951}}}
#reward A part
performValueIteration = ValueIteration(transition, rewardA, valueTable, convergenceTolerance, gamma)
optimalValuesA, policyTableA = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (6,4) , trapStates = [(3,0), (3,1),(3,2), (3,3)],valueTable=optimalValuesA)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (6,4) , trapStates = [(3,0), (3,1),(3,2), (3,3)], policy=policyTableA)
print(p_traj(trajectoryToGoalA,beta,gamma,transition,rewardA,optimalValuesA))
plt.plot([2,3,4,5,6,7,8],p_traj(trajectoryToGoalA,beta,gamma,transition,rewardA,optimalValuesA))
plt.ylabel('Probabilty of goal A (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol A without gap')
plt.show()
#reward A Gap part
performValueIteration = ValueIteration(transition, rewardAGap, valueTable, convergenceTolerance, gamma)
optimalValuesAG, policyTableAG = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (6,4) , trapStates = [(3,0), (3,2), (3,3)],valueTable=optimalValuesAG)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (6,4) , trapStates = [(3,0), (3,2), (3,3)], policy=policyTableAG)
print(p_traj(trajectoryToGoalA,beta,gamma,transition,rewardAGap,optimalValuesAG))
plt.plot([2,3,4,5,6,7,8],p_traj(trajectoryToGoalA,beta,gamma,transition,rewardAGap,optimalValuesAG))
plt.ylabel('Probabilty of goal A Gap (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol A with gap')
plt.show()
#reward B part
performValueIteration = ValueIteration(transition, rewardB, valueTable, convergenceTolerance, gamma)
optimalValuesB, policyTableB = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (6,1) , trapStates = [(3,0), (3,1),(3,2), (3,3)],valueTable=optimalValuesB)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (6,1) , trapStates = [(3,0), (3,1),(3,2), (3,3)], policy=policyTableB)
print(p_traj(trajectoryToGoalB,beta,gamma,transition,rewardB,optimalValuesB))
plt.plot([2,3,4,5,6,7,8],p_traj(trajectoryToGoalB,beta,gamma,transition,rewardB,optimalValuesB))
plt.ylabel('Probabilty of goal B (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol B without gap')
plt.show()
#reward B Gap part
performValueIteration = ValueIteration(transition, rewardBGap, valueTable, convergenceTolerance, gamma)
optimalValuesBG, policyTableBG = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (6,1) , trapStates = [(3,0), (3,2), (3,3)],valueTable=optimalValuesBG)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (6,1) , trapStates = [(3,0), (3,2), (3,3)], policy=policyTableBG)
print(p_traj(trajectoryToGoalB,beta,gamma,transition,rewardBGap,optimalValuesBG))
plt.plot([2,3,4,5,6,7,8],p_traj(trajectoryToGoalB,beta,gamma,transition,rewardBGap,optimalValuesBG))
plt.ylabel('Probabilty of goal B Gap (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol B with gap')
plt.show()
#reward C part
performValueIteration = ValueIteration(transition, rewardC, valueTable, convergenceTolerance, gamma)
optimalValuesC, policyTableC = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (1,5) , trapStates = [(3,0), (3,1),(3,2), (3,3)],valueTable=optimalValuesC)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (1,5) , trapStates = [(3,0), (3,1),(3,2), (3,3)], policy=policyTableC)
print(p_traj(trajectoryToGoalC,beta,gamma,transition,rewardC,optimalValuesC))
plt.plot([2,3,4,5,6],p_traj(trajectoryToGoalC,beta,gamma,transition,rewardC,optimalValuesC))
plt.ylabel('Probabilty of goal C (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol C without gap')
plt.show()
#reward C Gap part
performValueIteration = ValueIteration(transition, rewardCGap, valueTable, convergenceTolerance, gamma)
optimalValuesCG, policyTableCG = performValueIteration()
visualizeValueTable(gridWidth=7, gridHeight=6, goalState = (1,5) , trapStates = [(3,0), (3,2), (3,3)],valueTable=optimalValuesCG)
visualizePolicy(gridWidth=7, gridHeight=6, goalState = (1,5) , trapStates = [(3,0), (3,2), (3,3)], policy=policyTableCG)
print(p_traj(trajectoryToGoalC,beta,gamma,transition,rewardCGap,optimalValuesCG))
plt.plot([2,3,4,5,6],p_traj(trajectoryToGoalC,beta,gamma,transition,rewardCGap,optimalValuesCG))
plt.ylabel('Probabilty of goal C Gap (proportional to)')
plt.xlabel('Time point')
plt.title('Gaol C with gap')
plt.show()
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.